diff --git a/sdk/batch/azure-batch/CHANGELOG.md b/sdk/batch/azure-batch/CHANGELOG.md index c188dce064dc..32f84aaef17f 100644 --- a/sdk/batch/azure-batch/CHANGELOG.md +++ b/sdk/batch/azure-batch/CHANGELOG.md @@ -1,5 +1,11 @@ # Release History +## 15.1.0 (2026-03-06) + +### Other Changes + +- This is the GA release of the features introduced in the 15.0.0 and 15.1.0 beta versions, including LRO support, job-level FIFO scheduling, CMK support on pools, IPv6 support, metadata security protocol support, IP tag support, and confidential VM enhancements. No additional changes were made from the last beta release. + ## 15.1.0b3 (2026-02-05) ### Other Changes diff --git a/sdk/batch/azure-batch/README.md b/sdk/batch/azure-batch/README.md index a6fc46bb9083..266c07fed26c 100644 --- a/sdk/batch/azure-batch/README.md +++ b/sdk/batch/azure-batch/README.md @@ -22,7 +22,7 @@ pip install azure-batch azure-identity ### Prerequisites * An Azure subscription. If you don't have one, [create an account for free][azure_sub] * A [Batch account][azure_batch] with a linked [Storage account][azure_storage] -* Python 3.9 or later. For more details, please see the [Azure SDK for Python version support policy](https://github.com/Azure/azure-sdk-for-python/blob/main/doc/python_version_support_policy.md) +* Python 3.9 or later. ### Authenticate the client @@ -391,7 +391,7 @@ task1 = models.BatchTaskCreateOptions(id="task1", command_line='cmd /c "echo hel task2 = models.BatchTaskCreateOptions(id="task2", command_line='cmd /c "echo hello world"') task3 = models.BatchTaskCreateOptions(id="task3", command_line='cmd /c "echo hello world"') -task_group = models.BatchTaskGroup(values_property=[task1, task2, task3]) +task_group = models.BatchTaskGroup(task_values=[task1, task2, task3]) result = client.create_task_collection(job_id="my-job", task_collection=task_group) ``` diff --git a/sdk/batch/azure-batch/_metadata.json b/sdk/batch/azure-batch/_metadata.json new file mode 100644 index 000000000000..05e6b511e7bd --- /dev/null +++ b/sdk/batch/azure-batch/_metadata.json @@ -0,0 +1,6 @@ +{ + "apiVersion": "2025-06-01", + "apiVersions": { + "Azure.Batch": "2025-06-01" + } +} \ No newline at end of file diff --git a/sdk/batch/azure-batch/apiview-properties.json b/sdk/batch/azure-batch/apiview-properties.json index 47ff5e99e5c1..53c56cc69577 100644 --- a/sdk/batch/azure-batch/apiview-properties.json +++ b/sdk/batch/azure-batch/apiview-properties.json @@ -59,7 +59,7 @@ "azure.batch.models.BatchNodeRemoteLoginSettings": "Azure.Batch.BatchNodeRemoteLoginSettings", "azure.batch.models.BatchNodeRemoveOptions": "Azure.Batch.BatchNodeRemoveOptions", "azure.batch.models.BatchNodeUserCreateOptions": "Azure.Batch.BatchNodeUserCreateOptions", - "azure.batch.models.BatchNodeUserUpdateOptions": "Azure.Batch.BatchNodeUserUpdateOptions", + "azure.batch.models.BatchNodeUserReplaceOptions": "Azure.Batch.BatchNodeUserUpdateOptions", "azure.batch.models.BatchNodeVMExtension": "Azure.Batch.BatchNodeVMExtension", "azure.batch.models.BatchOsDisk": "Azure.Batch.BatchOsDisk", "azure.batch.models.BatchPool": "Azure.Batch.BatchPool", diff --git a/sdk/batch/azure-batch/azure/batch/_client.py b/sdk/batch/azure-batch/azure/batch/_client.py index d02fbf187756..7bfb2ec83c25 100644 --- a/sdk/batch/azure-batch/azure/batch/_client.py +++ b/sdk/batch/azure-batch/azure/batch/_client.py @@ -31,8 +31,9 @@ class BatchClient(_BatchClientOperationsMixin): :type endpoint: str :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is "2025-06-01". - Note that overriding this default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2025-06-01" + and None. Default value is "2025-06-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ diff --git a/sdk/batch/azure-batch/azure/batch/_configuration.py b/sdk/batch/azure-batch/azure/batch/_configuration.py index 18cdffc28af4..62f8fc9d2560 100644 --- a/sdk/batch/azure-batch/azure/batch/_configuration.py +++ b/sdk/batch/azure-batch/azure/batch/_configuration.py @@ -28,8 +28,9 @@ class BatchClientConfiguration: # pylint: disable=too-many-instance-attributes :type endpoint: str :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials.TokenCredential - :keyword api_version: The API version to use for this operation. Default value is "2025-06-01". - Note that overriding this default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2025-06-01" + and None. Default value is "2025-06-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ diff --git a/sdk/batch/azure-batch/azure/batch/_operations/_operations.py b/sdk/batch/azure-batch/azure/batch/_operations/_operations.py index addeaba87350..ceef9d3d52ad 100644 --- a/sdk/batch/azure-batch/azure/batch/_operations/_operations.py +++ b/sdk/batch/azure-batch/azure/batch/_operations/_operations.py @@ -45,8 +45,8 @@ def build_batch_list_applications_request( *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, **kwargs: Any ) -> HttpRequest: @@ -61,21 +61,25 @@ def build_batch_list_applications_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if max_results is not None: _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) def build_batch_get_application_request( - application_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + application_id: str, + *, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, + **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -93,12 +97,12 @@ def build_batch_get_application_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -106,11 +110,11 @@ def build_batch_get_application_request( def build_batch_list_pool_usage_metrics_request( # pylint: disable=name-too-long *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, - starttime: Optional[datetime.datetime] = None, - endtime: Optional[datetime.datetime] = None, + start_time: Optional[datetime.datetime] = None, + end_time: Optional[datetime.datetime] = None, filter: Optional[str] = None, **kwargs: Any ) -> HttpRequest: @@ -125,27 +129,27 @@ def build_batch_list_pool_usage_metrics_request( # pylint: disable=name-too-lon # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if max_results is not None: _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") - if starttime is not None: - _params["startTime"] = _SERIALIZER.query("starttime", starttime, "iso-8601") - if endtime is not None: - _params["endtime"] = _SERIALIZER.query("endtime", endtime, "iso-8601") + if start_time is not None: + _params["startTime"] = _SERIALIZER.query("start_time", start_time, "iso-8601") + if end_time is not None: + _params["endtime"] = _SERIALIZER.query("end_time", end_time, "iso-8601") if filter is not None: _params["$filter"] = _SERIALIZER.query("filter", filter, "str") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) def build_batch_create_pool_request( - *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + *, service_timeout: Optional[int] = None, ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -157,12 +161,12 @@ def build_batch_create_pool_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -170,8 +174,8 @@ def build_batch_create_pool_request( def build_batch_list_pools_request( *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -189,8 +193,8 @@ def build_batch_list_pools_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if max_results is not None: _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: @@ -201,8 +205,8 @@ def build_batch_list_pools_request( _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -211,8 +215,8 @@ def build_batch_list_pools_request( def build_batch_delete_pool_internal_request( pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -233,12 +237,12 @@ def build_batch_delete_pool_internal_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -256,8 +260,8 @@ def build_batch_delete_pool_internal_request( def build_batch_pool_exists_request( pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -278,12 +282,12 @@ def build_batch_pool_exists_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -301,8 +305,8 @@ def build_batch_pool_exists_request( def build_batch_get_pool_request( pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, @@ -327,16 +331,16 @@ def build_batch_get_pool_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if select is not None: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") if expand is not None: _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -355,8 +359,8 @@ def build_batch_get_pool_request( def build_batch_update_pool_request( pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -378,12 +382,12 @@ def build_batch_update_pool_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") @@ -400,7 +404,7 @@ def build_batch_update_pool_request( def build_batch_disable_pool_auto_scale_request( # pylint: disable=name-too-long - pool_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + pool_id: str, *, service_timeout: Optional[int] = None, ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -416,12 +420,12 @@ def build_batch_disable_pool_auto_scale_request( # pylint: disable=name-too-lon # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -429,8 +433,8 @@ def build_batch_disable_pool_auto_scale_request( # pylint: disable=name-too-lon def build_batch_enable_pool_auto_scale_request( # pylint: disable=name-too-long pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -452,12 +456,12 @@ def build_batch_enable_pool_auto_scale_request( # pylint: disable=name-too-long # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") @@ -474,7 +478,7 @@ def build_batch_enable_pool_auto_scale_request( # pylint: disable=name-too-long def build_batch_evaluate_pool_auto_scale_request( # pylint: disable=name-too-long - pool_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + pool_id: str, *, service_timeout: Optional[int] = None, ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -493,12 +497,12 @@ def build_batch_evaluate_pool_auto_scale_request( # pylint: disable=name-too-lo # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -508,8 +512,8 @@ def build_batch_evaluate_pool_auto_scale_request( # pylint: disable=name-too-lo def build_batch_resize_pool_internal_request( pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -531,12 +535,12 @@ def build_batch_resize_pool_internal_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") @@ -555,8 +559,8 @@ def build_batch_resize_pool_internal_request( def build_batch_stop_pool_resize_internal_request( # pylint: disable=name-too-long pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -577,12 +581,12 @@ def build_batch_stop_pool_resize_internal_request( # pylint: disable=name-too-l # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -598,7 +602,7 @@ def build_batch_stop_pool_resize_internal_request( # pylint: disable=name-too-l def build_batch_replace_pool_properties_request( # pylint: disable=name-too-long - pool_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + pool_id: str, *, service_timeout: Optional[int] = None, ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -615,12 +619,12 @@ def build_batch_replace_pool_properties_request( # pylint: disable=name-too-lon # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -629,8 +633,8 @@ def build_batch_replace_pool_properties_request( # pylint: disable=name-too-lon def build_batch_remove_nodes_internal_request( # pylint: disable=name-too-long pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -652,12 +656,12 @@ def build_batch_remove_nodes_internal_request( # pylint: disable=name-too-long # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") @@ -675,8 +679,8 @@ def build_batch_remove_nodes_internal_request( # pylint: disable=name-too-long def build_batch_list_supported_images_request( # pylint: disable=name-too-long *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any @@ -692,16 +696,16 @@ def build_batch_list_supported_images_request( # pylint: disable=name-too-long # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if max_results is not None: _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: _params["$filter"] = _SERIALIZER.query("filter", filter, "str") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -709,8 +713,8 @@ def build_batch_list_supported_images_request( # pylint: disable=name-too-long def build_batch_list_pool_node_counts_request( # pylint: disable=name-too-long *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any @@ -726,16 +730,16 @@ def build_batch_list_pool_node_counts_request( # pylint: disable=name-too-long # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if max_results is not None: _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: _params["$filter"] = _SERIALIZER.query("filter", filter, "str") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -744,8 +748,8 @@ def build_batch_list_pool_node_counts_request( # pylint: disable=name-too-long def build_batch_delete_job_internal_request( job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -767,14 +771,14 @@ def build_batch_delete_job_internal_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if force is not None: _params["force"] = _SERIALIZER.query("force", force, "bool") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -792,8 +796,8 @@ def build_batch_delete_job_internal_request( def build_batch_get_job_request( job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, @@ -818,16 +822,16 @@ def build_batch_get_job_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if select is not None: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") if expand is not None: _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -846,8 +850,8 @@ def build_batch_get_job_request( def build_batch_update_job_request( job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -869,12 +873,12 @@ def build_batch_update_job_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") @@ -893,8 +897,8 @@ def build_batch_update_job_request( def build_batch_replace_job_request( job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -916,12 +920,12 @@ def build_batch_replace_job_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") @@ -940,8 +944,8 @@ def build_batch_replace_job_request( def build_batch_disable_job_internal_request( job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -963,12 +967,12 @@ def build_batch_disable_job_internal_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") @@ -987,8 +991,8 @@ def build_batch_disable_job_internal_request( def build_batch_enable_job_internal_request( job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -1009,12 +1013,12 @@ def build_batch_enable_job_internal_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -1032,8 +1036,8 @@ def build_batch_enable_job_internal_request( def build_batch_terminate_job_internal_request( # pylint: disable=name-too-long job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -1056,14 +1060,14 @@ def build_batch_terminate_job_internal_request( # pylint: disable=name-too-long # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if force is not None: _params["force"] = _SERIALIZER.query("force", force, "bool") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") @@ -1080,7 +1084,7 @@ def build_batch_terminate_job_internal_request( # pylint: disable=name-too-long def build_batch_create_job_request( - *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + *, service_timeout: Optional[int] = None, ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -1092,12 +1096,12 @@ def build_batch_create_job_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -1105,8 +1109,8 @@ def build_batch_create_job_request( def build_batch_list_jobs_request( *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -1124,8 +1128,8 @@ def build_batch_list_jobs_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if max_results is not None: _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: @@ -1136,8 +1140,8 @@ def build_batch_list_jobs_request( _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -1146,8 +1150,8 @@ def build_batch_list_jobs_request( def build_batch_list_jobs_from_schedule_request( # pylint: disable=name-too-long job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -1170,8 +1174,8 @@ def build_batch_list_jobs_from_schedule_request( # pylint: disable=name-too-lon # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if max_results is not None: _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: @@ -1182,8 +1186,8 @@ def build_batch_list_jobs_from_schedule_request( # pylint: disable=name-too-lon _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -1192,8 +1196,8 @@ def build_batch_list_jobs_from_schedule_request( # pylint: disable=name-too-lon def build_batch_list_job_preparation_and_release_task_status_request( # pylint: disable=name-too-long job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -1215,8 +1219,8 @@ def build_batch_list_job_preparation_and_release_task_status_request( # pylint: # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if max_results is not None: _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: @@ -1225,15 +1229,15 @@ def build_batch_list_job_preparation_and_release_task_status_request( # pylint: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) def build_batch_get_job_task_counts_request( - job_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + job_id: str, *, service_timeout: Optional[int] = None, ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -1251,12 +1255,12 @@ def build_batch_get_job_task_counts_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -1265,8 +1269,8 @@ def build_batch_get_job_task_counts_request( def build_batch_job_schedule_exists_request( job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -1287,12 +1291,12 @@ def build_batch_job_schedule_exists_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -1310,8 +1314,8 @@ def build_batch_job_schedule_exists_request( def build_batch_delete_job_schedule_internal_request( # pylint: disable=name-too-long job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -1333,14 +1337,14 @@ def build_batch_delete_job_schedule_internal_request( # pylint: disable=name-to # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if force is not None: _params["force"] = _SERIALIZER.query("force", force, "bool") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -1358,8 +1362,8 @@ def build_batch_delete_job_schedule_internal_request( # pylint: disable=name-to def build_batch_get_job_schedule_request( job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, @@ -1384,16 +1388,16 @@ def build_batch_get_job_schedule_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if select is not None: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") if expand is not None: _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -1412,8 +1416,8 @@ def build_batch_get_job_schedule_request( def build_batch_update_job_schedule_request( job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -1435,12 +1439,12 @@ def build_batch_update_job_schedule_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") @@ -1459,8 +1463,8 @@ def build_batch_update_job_schedule_request( def build_batch_replace_job_schedule_request( job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -1482,12 +1486,12 @@ def build_batch_replace_job_schedule_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") @@ -1506,8 +1510,8 @@ def build_batch_replace_job_schedule_request( def build_batch_disable_job_schedule_request( job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -1528,12 +1532,12 @@ def build_batch_disable_job_schedule_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -1551,8 +1555,8 @@ def build_batch_disable_job_schedule_request( def build_batch_enable_job_schedule_request( job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -1573,12 +1577,12 @@ def build_batch_enable_job_schedule_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -1596,8 +1600,8 @@ def build_batch_enable_job_schedule_request( def build_batch_terminate_job_schedule_internal_request( # pylint: disable=name-too-long job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -1619,14 +1623,14 @@ def build_batch_terminate_job_schedule_internal_request( # pylint: disable=name # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if force is not None: _params["force"] = _SERIALIZER.query("force", force, "bool") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -1642,7 +1646,7 @@ def build_batch_terminate_job_schedule_internal_request( # pylint: disable=name def build_batch_create_job_schedule_request( - *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + *, service_timeout: Optional[int] = None, ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -1654,12 +1658,12 @@ def build_batch_create_job_schedule_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -1667,8 +1671,8 @@ def build_batch_create_job_schedule_request( def build_batch_list_job_schedules_request( *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -1686,8 +1690,8 @@ def build_batch_list_job_schedules_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if max_results is not None: _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: @@ -1698,15 +1702,15 @@ def build_batch_list_job_schedules_request( _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) def build_batch_create_task_request( - job_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + job_id: str, *, service_timeout: Optional[int] = None, ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -1723,12 +1727,12 @@ def build_batch_create_task_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -1737,8 +1741,8 @@ def build_batch_create_task_request( def build_batch_list_tasks_request( job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -1761,8 +1765,8 @@ def build_batch_list_tasks_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if max_results is not None: _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: @@ -1773,15 +1777,15 @@ def build_batch_list_tasks_request( _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) def build_batch_create_task_collection_request( # pylint: disable=name-too-long - job_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + job_id: str, *, service_timeout: Optional[int] = None, ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) @@ -1800,12 +1804,12 @@ def build_batch_create_task_collection_request( # pylint: disable=name-too-long # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -1816,8 +1820,8 @@ def build_batch_delete_task_request( job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -1839,12 +1843,12 @@ def build_batch_delete_task_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -1863,8 +1867,8 @@ def build_batch_get_task_request( job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, @@ -1890,16 +1894,16 @@ def build_batch_get_task_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if select is not None: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") if expand is not None: _params["$expand"] = _SERIALIZER.query("expand", expand, "[str]", div=",") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -1919,8 +1923,8 @@ def build_batch_replace_task_request( job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -1943,12 +1947,12 @@ def build_batch_replace_task_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") @@ -1968,8 +1972,8 @@ def build_batch_list_sub_tasks_request( job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, **kwargs: Any ) -> HttpRequest: @@ -1990,14 +1994,14 @@ def build_batch_list_sub_tasks_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if select is not None: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -2007,8 +2011,8 @@ def build_batch_terminate_task_request( job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -2030,12 +2034,12 @@ def build_batch_terminate_task_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -2054,8 +2058,8 @@ def build_batch_reactivate_task_request( job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -2077,12 +2081,12 @@ def build_batch_reactivate_task_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -2102,8 +2106,8 @@ def build_batch_delete_task_file_request( task_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, recursive: Optional[bool] = None, **kwargs: Any ) -> HttpRequest: @@ -2123,14 +2127,14 @@ def build_batch_delete_task_file_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if recursive is not None: _params["recursive"] = _SERIALIZER.query("recursive", recursive, "bool") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) @@ -2140,8 +2144,8 @@ def build_batch_get_task_file_request( task_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, ocp_range: Optional[str] = None, @@ -2165,12 +2169,12 @@ def build_batch_get_task_file_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -2187,8 +2191,8 @@ def build_batch_get_task_file_properties_internal_request( # pylint: disable=na task_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, **kwargs: Any @@ -2209,12 +2213,12 @@ def build_batch_get_task_file_properties_internal_request( # pylint: disable=na # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -2227,8 +2231,8 @@ def build_batch_list_task_files_request( job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, recursive: Optional[bool] = None, @@ -2251,8 +2255,8 @@ def build_batch_list_task_files_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if max_results is not None: _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: @@ -2261,8 +2265,8 @@ def build_batch_list_task_files_request( _params["recursive"] = _SERIALIZER.query("recursive", recursive, "bool") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -2272,8 +2276,8 @@ def build_batch_create_node_user_request( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -2292,12 +2296,12 @@ def build_batch_create_node_user_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -2308,8 +2312,8 @@ def build_batch_delete_node_user_request( node_id: str, user_name: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -2328,12 +2332,12 @@ def build_batch_delete_node_user_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) @@ -2343,8 +2347,8 @@ def build_batch_replace_node_user_request( node_id: str, user_name: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -2364,12 +2368,12 @@ def build_batch_replace_node_user_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) @@ -2379,8 +2383,8 @@ def build_batch_get_node_request( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, **kwargs: Any ) -> HttpRequest: @@ -2401,14 +2405,14 @@ def build_batch_get_node_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if select is not None: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -2418,8 +2422,8 @@ def build_batch_reboot_node_internal_request( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -2438,12 +2442,12 @@ def build_batch_reboot_node_internal_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -2453,8 +2457,8 @@ def build_batch_start_node_internal_request( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -2472,12 +2476,12 @@ def build_batch_start_node_internal_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -2486,8 +2490,8 @@ def build_batch_reimage_node_internal_request( # pylint: disable=name-too-long pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -2506,12 +2510,12 @@ def build_batch_reimage_node_internal_request( # pylint: disable=name-too-long # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -2521,8 +2525,8 @@ def build_batch_deallocate_node_internal_request( # pylint: disable=name-too-lo pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -2541,12 +2545,12 @@ def build_batch_deallocate_node_internal_request( # pylint: disable=name-too-lo # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -2556,8 +2560,8 @@ def build_batch_disable_node_scheduling_request( # pylint: disable=name-too-lon pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -2576,12 +2580,12 @@ def build_batch_disable_node_scheduling_request( # pylint: disable=name-too-lon # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -2591,8 +2595,8 @@ def build_batch_enable_node_scheduling_request( # pylint: disable=name-too-long pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -2610,12 +2614,12 @@ def build_batch_enable_node_scheduling_request( # pylint: disable=name-too-long # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) @@ -2624,8 +2628,8 @@ def build_batch_get_node_remote_login_settings_request( # pylint: disable=name- pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -2645,12 +2649,12 @@ def build_batch_get_node_remote_login_settings_request( # pylint: disable=name- # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -2660,8 +2664,8 @@ def build_batch_upload_node_logs_request( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -2682,12 +2686,12 @@ def build_batch_upload_node_logs_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["content-type"] = _SERIALIZER.header("content_type", content_type, "str") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -2697,8 +2701,8 @@ def build_batch_upload_node_logs_request( def build_batch_list_nodes_request( pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -2720,8 +2724,8 @@ def build_batch_list_nodes_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if max_results is not None: _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: @@ -2730,8 +2734,8 @@ def build_batch_list_nodes_request( _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -2742,8 +2746,8 @@ def build_batch_get_node_extension_request( node_id: str, extension_name: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, **kwargs: Any ) -> HttpRequest: @@ -2765,14 +2769,14 @@ def build_batch_get_node_extension_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if select is not None: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -2782,8 +2786,8 @@ def build_batch_list_node_extensions_request( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, select: Optional[list[str]] = None, **kwargs: Any @@ -2805,16 +2809,16 @@ def build_batch_list_node_extensions_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if max_results is not None: _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if select is not None: _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -2825,8 +2829,8 @@ def build_batch_delete_node_file_request( node_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, recursive: Optional[bool] = None, **kwargs: Any ) -> HttpRequest: @@ -2846,14 +2850,14 @@ def build_batch_delete_node_file_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if recursive is not None: _params["recursive"] = _SERIALIZER.query("recursive", recursive, "bool") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) @@ -2863,8 +2867,8 @@ def build_batch_get_node_file_request( node_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, ocp_range: Optional[str] = None, @@ -2888,12 +2892,12 @@ def build_batch_get_node_file_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -2910,8 +2914,8 @@ def build_batch_get_node_file_properties_internal_request( # pylint: disable=na node_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, **kwargs: Any @@ -2932,12 +2936,12 @@ def build_batch_get_node_file_properties_internal_request( # pylint: disable=na # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") if if_modified_since is not None: _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") if if_unmodified_since is not None: @@ -2950,8 +2954,8 @@ def build_batch_list_node_files_request( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, recursive: Optional[bool] = None, @@ -2974,8 +2978,8 @@ def build_batch_list_node_files_request( # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if timeout is not None: - _params["timeOut"] = _SERIALIZER.query("timeout", timeout, "int") + if service_timeout is not None: + _params["timeOut"] = _SERIALIZER.query("service_timeout", service_timeout, "int") if max_results is not None: _params["maxresults"] = _SERIALIZER.query("max_results", max_results, "int") if filter is not None: @@ -2984,8 +2988,8 @@ def build_batch_list_node_files_request( _params["recursive"] = _SERIALIZER.query("recursive", recursive, "bool") # Construct headers - if ocpdate is not None: - _headers["ocp-date"] = _SERIALIZER.header("ocpdate", ocpdate, "rfc-1123") + if ocp_date is not None: + _headers["ocp-date"] = _SERIALIZER.header("ocp_date", ocp_date, "rfc-1123") _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) @@ -2999,27 +3003,26 @@ class _BatchClientOperationsMixin( # pylint: disable=too-many-public-methods def list_applications( self, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, **kwargs: Any ) -> ItemPaged["_models.BatchApplication"]: """Lists all of the applications available in the specified Account. - This operation returns only Applications and versions that are available for - use on Compute Nodes; that is, that can be used in an Package reference. For - administrator information about applications and versions that are not yet - available to Compute Nodes, use the Azure portal or the Azure Resource Manager - API. + This operation returns only Applications and versions that are available for use on Compute + Nodes; that is, that can be used in an Package reference. For administrator information about + applications and versions that are not yet available to Compute Nodes, use the Azure portal or + the Azure Resource Manager API. - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -3044,8 +3047,8 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_applications_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, api_version=self._config.api_version, headers=_headers, @@ -3113,28 +3116,27 @@ def get_application( self, application_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.BatchApplication: """Gets information about the specified Application. - This operation returns only Applications and versions that are available for - use on Compute Nodes; that is, that can be used in an Package reference. For - administrator information about Applications and versions that are not yet - available to Compute Nodes, use the Azure portal or the Azure Resource Manager - API. + This operation returns only Applications and versions that are available for use on Compute + Nodes; that is, that can be used in an Package reference. For administrator information about + Applications and versions that are not yet available to Compute Nodes, use the Azure portal or + the Azure Resource Manager API. :param application_id: The ID of the Application. Required. :type application_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: BatchApplication. The BatchApplication is compatible with MutableMapping :rtype: ~azure.batch.models.BatchApplication :raises ~azure.core.exceptions.HttpResponseError: @@ -3154,8 +3156,8 @@ def get_application( _request = build_batch_get_application_request( application_id=application_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, api_version=self._config.api_version, headers=_headers, params=_params, @@ -3205,45 +3207,44 @@ def get_application( def list_pool_usage_metrics( self, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, - starttime: Optional[datetime.datetime] = None, - endtime: Optional[datetime.datetime] = None, + start_time: Optional[datetime.datetime] = None, + end_time: Optional[datetime.datetime] = None, filter: Optional[str] = None, **kwargs: Any ) -> ItemPaged["_models.BatchPoolUsageMetrics"]: - """Lists the usage metrics, aggregated by Pool across individual time intervals, - for the specified Account. + """Lists the usage metrics, aggregated by Pool across individual time intervals, for the specified + Account. - If you do not specify a $filter clause including a poolId, the response - includes all Pools that existed in the Account in the time range of the - returned aggregation intervals. If you do not specify a $filter clause - including a startTime or endTime these filters default to the start and end - times of the last aggregation interval currently available; that is, only the - last aggregation interval is returned. + If you do not specify a $filter clause including a poolId, the response includes all Pools that + existed in the Account in the time range of the returned aggregation intervals. If you do not + specify a $filter clause including a startTime or endTime these filters default to the start + and end times of the last aggregation interval currently available; that is, only the last + aggregation interval is returned. - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int - :keyword starttime: The earliest time from which to include metrics. This must be at least two + :keyword start_time: The earliest time from which to include metrics. This must be at least two and a half hours before the current time. If not specified this defaults to the start time of the last aggregation interval currently available. Default value is None. - :paramtype starttime: ~datetime.datetime - :keyword endtime: The latest time from which to include metrics. This must be at least two + :paramtype start_time: ~datetime.datetime + :keyword end_time: The latest time from which to include metrics. This must be at least two hours before the current time. If not specified this defaults to the end time of the last aggregation interval currently available. Default value is None. - :paramtype endtime: ~datetime.datetime + :paramtype end_time: ~datetime.datetime :keyword filter: An OData $filter clause. For more information on constructing this filter, see `https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics `_. @@ -3270,11 +3271,11 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_pool_usage_metrics_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, - starttime=starttime, - endtime=endtime, + start_time=start_time, + end_time=end_time, filter=filter, api_version=self._config.api_version, headers=_headers, @@ -3342,26 +3343,25 @@ def create_pool( # pylint: disable=inconsistent-return-statements self, pool: _models.BatchPoolCreateOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Creates a Pool to the specified Account. - When naming Pools, avoid including sensitive information such as user names or - secret project names. This information may appear in telemetry logs accessible - to Microsoft Support engineers. + When naming Pools, avoid including sensitive information such as user names or secret project + names. This information may appear in telemetry logs accessible to Microsoft Support engineers. :param pool: The Pool to be created. Required. :type pool: ~azure.batch.models.BatchPoolCreateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -3385,8 +3385,8 @@ def create_pool( # pylint: disable=inconsistent-return-statements _content = json.dumps(pool, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_create_pool_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -3427,8 +3427,8 @@ def create_pool( # pylint: disable=inconsistent-return-statements def list_pools( self, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -3439,14 +3439,14 @@ def list_pools( Lists all of the Pools in the specified Account. - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -3480,8 +3480,8 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_pools_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, select=select, @@ -3552,8 +3552,8 @@ def _delete_pool_internal( # pylint: disable=inconsistent-return-statements self, pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -3562,29 +3562,27 @@ def _delete_pool_internal( # pylint: disable=inconsistent-return-statements ) -> None: """Deletes a Pool from the specified Account. - When you request that a Pool be deleted, the following actions occur: the Pool - state is set to deleting; any ongoing resize operation on the Pool are stopped; - the Batch service starts resizing the Pool to zero Compute Nodes; any Tasks - running on existing Compute Nodes are terminated and requeued (as if a resize - Pool operation had been requested with the default requeue option); finally, - the Pool is removed from the system. Because running Tasks are requeued, the - user can rerun these Tasks by updating their Job to target a different Pool. - The Tasks can then run on the new Pool. If you want to override the requeue - behavior, then you should call resize Pool explicitly to shrink the Pool to - zero size before deleting the Pool. If you call an Update, Patch or Delete API - on a Pool in the deleting state, it will fail with HTTP status code 409 with - error code PoolBeingDeleted. + When you request that a Pool be deleted, the following actions occur: the Pool state is set to + deleting; any ongoing resize operation on the Pool are stopped; the Batch service starts + resizing the Pool to zero Compute Nodes; any Tasks running on existing Compute Nodes are + terminated and requeued (as if a resize Pool operation had been requested with the default + requeue option); finally, the Pool is removed from the system. Because running Tasks are + requeued, the user can rerun these Tasks by updating their Job to target a different Pool. The + Tasks can then run on the new Pool. If you want to override the requeue behavior, then you + should call resize Pool explicitly to shrink the Pool to zero size before deleting the Pool. If + you call an Update, Patch or Delete API on a Pool in the deleting state, it will fail with HTTP + status code 409 with error code PoolBeingDeleted. :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -3625,8 +3623,8 @@ def _delete_pool_internal( # pylint: disable=inconsistent-return-statements _request = build_batch_delete_pool_internal_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -3667,8 +3665,8 @@ def pool_exists( self, pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -3679,14 +3677,14 @@ def pool_exists( :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -3727,8 +3725,8 @@ def pool_exists( _request = build_batch_pool_exists_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -3773,8 +3771,8 @@ def get_pool( self, pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, @@ -3787,14 +3785,14 @@ def get_pool( :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -3839,8 +3837,8 @@ def get_pool( _request = build_batch_get_pool_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, select=select, @@ -3898,8 +3896,8 @@ def update_pool( # pylint: disable=inconsistent-return-statements pool_id: str, pool: _models.BatchPoolUpdateOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -3908,22 +3906,22 @@ def update_pool( # pylint: disable=inconsistent-return-statements ) -> None: """Updates the properties of the specified Pool. - This only replaces the Pool properties specified in the request. For example, - if the Pool has a StartTask associated with it, and a request does not specify - a StartTask element, then the Pool keeps the existing StartTask. + This only replaces the Pool properties specified in the request. For example, if the Pool has a + StartTask associated with it, and a request does not specify a StartTask element, then the Pool + keeps the existing StartTask. :param pool_id: The ID of the Pool to get. Required. :type pool_id: str :param pool: The pool properties to update. Required. :type pool: ~azure.batch.models.BatchPoolUpdateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -3969,8 +3967,8 @@ def update_pool( # pylint: disable=inconsistent-return-statements _request = build_batch_update_pool_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -4013,7 +4011,12 @@ def update_pool( # pylint: disable=inconsistent-return-statements @distributed_trace def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statements - self, pool_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + self, + pool_id: str, + *, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, + **kwargs: Any ) -> None: """Disables automatic scaling for a Pool. @@ -4021,14 +4024,14 @@ def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statements :param pool_id: The ID of the Pool on which to disable automatic scaling. Required. :type pool_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -4048,8 +4051,8 @@ def disable_pool_auto_scale( # pylint: disable=inconsistent-return-statements _request = build_batch_disable_pool_auto_scale_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, api_version=self._config.api_version, headers=_headers, params=_params, @@ -4090,8 +4093,8 @@ def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements pool_id: str, enable_auto_scale_options: _models.BatchPoolEnableAutoScaleOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -4100,25 +4103,24 @@ def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements ) -> None: """Enables automatic scaling for a Pool. - You cannot enable automatic scaling on a Pool if a resize operation is in - progress on the Pool. If automatic scaling of the Pool is currently disabled, - you must specify a valid autoscale formula as part of the request. If automatic - scaling of the Pool is already enabled, you may specify a new autoscale formula - and/or a new evaluation interval. You cannot call this API for the same Pool - more than once every 30 seconds. + You cannot enable automatic scaling on a Pool if a resize operation is in progress on the Pool. + If automatic scaling of the Pool is currently disabled, you must specify a valid autoscale + formula as part of the request. If automatic scaling of the Pool is already enabled, you may + specify a new autoscale formula and/or a new evaluation interval. You cannot call this API for + the same Pool more than once every 30 seconds. :param pool_id: The ID of the Pool to get. Required. :type pool_id: str :param enable_auto_scale_options: The options to use for enabling automatic scaling. Required. :type enable_auto_scale_options: ~azure.batch.models.BatchPoolEnableAutoScaleOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -4164,8 +4166,8 @@ def enable_pool_auto_scale( # pylint: disable=inconsistent-return-statements _request = build_batch_enable_pool_auto_scale_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -4212,15 +4214,15 @@ def evaluate_pool_auto_scale( pool_id: str, evaluate_auto_scale_options: _models.BatchPoolEvaluateAutoScaleOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.AutoScaleRun: """Gets the result of evaluating an automatic scaling formula on the Pool. - This API is primarily for validating an autoscale formula, as it simply returns - the result without applying the formula to the Pool. The Pool must have auto - scaling enabled in order to evaluate a formula. + This API is primarily for validating an autoscale formula, as it simply returns the result + without applying the formula to the Pool. The Pool must have auto scaling enabled in order to + evaluate a formula. :param pool_id: The ID of the Pool on which to evaluate the automatic scaling formula. Required. @@ -4228,14 +4230,14 @@ def evaluate_pool_auto_scale( :param evaluate_auto_scale_options: The options to use for evaluating the automatic scaling formula. Required. :type evaluate_auto_scale_options: ~azure.batch.models.BatchPoolEvaluateAutoScaleOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: AutoScaleRun. The AutoScaleRun is compatible with MutableMapping :rtype: ~azure.batch.models.AutoScaleRun :raises ~azure.core.exceptions.HttpResponseError: @@ -4260,8 +4262,8 @@ def evaluate_pool_auto_scale( _request = build_batch_evaluate_pool_auto_scale_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -4316,8 +4318,8 @@ def _resize_pool_internal( # pylint: disable=inconsistent-return-statements pool_id: str, resize_options: _models.BatchPoolResizeOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -4326,26 +4328,25 @@ def _resize_pool_internal( # pylint: disable=inconsistent-return-statements ) -> None: """Changes the number of Compute Nodes that are assigned to a Pool. - You can only resize a Pool when its allocation state is steady. If the Pool is - already resizing, the request fails with status code 409. When you resize a - Pool, the Pool's allocation state changes from steady to resizing. You cannot - resize Pools which are configured for automatic scaling. If you try to do this, - the Batch service returns an error 409. If you resize a Pool downwards, the - Batch service chooses which Compute Nodes to remove. To remove specific Compute - Nodes, use the Pool remove Compute Nodes API instead. + You can only resize a Pool when its allocation state is steady. If the Pool is already + resizing, the request fails with status code 409. When you resize a Pool, the Pool's allocation + state changes from steady to resizing. You cannot resize Pools which are configured for + automatic scaling. If you try to do this, the Batch service returns an error 409. If you resize + a Pool downwards, the Batch service chooses which Compute Nodes to remove. To remove specific + Compute Nodes, use the Pool remove Compute Nodes API instead. :param pool_id: The ID of the Pool to get. Required. :type pool_id: str :param resize_options: The options to use for resizing the pool. Required. :type resize_options: ~azure.batch.models.BatchPoolResizeOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -4391,8 +4392,8 @@ def _resize_pool_internal( # pylint: disable=inconsistent-return-statements _request = build_batch_resize_pool_internal_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -4438,8 +4439,8 @@ def _stop_pool_resize_internal( # pylint: disable=inconsistent-return-statement self, pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -4448,24 +4449,23 @@ def _stop_pool_resize_internal( # pylint: disable=inconsistent-return-statement ) -> None: """Stops an ongoing resize operation on the Pool. - This does not restore the Pool to its previous state before the resize - operation: it only stops any further changes being made, and the Pool maintains - its current state. After stopping, the Pool stabilizes at the number of Compute - Nodes it was at when the stop operation was done. During the stop operation, - the Pool allocation state changes first to stopping and then to steady. A - resize operation need not be an explicit resize Pool request; this API can also - be used to halt the initial sizing of the Pool when it is created. + This does not restore the Pool to its previous state before the resize operation: it only stops + any further changes being made, and the Pool maintains its current state. After stopping, the + Pool stabilizes at the number of Compute Nodes it was at when the stop operation was done. + During the stop operation, the Pool allocation state changes first to stopping and then to + steady. A resize operation need not be an explicit resize Pool request; this API can also be + used to halt the initial sizing of the Pool when it is created. :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -4506,8 +4506,8 @@ def _stop_pool_resize_internal( # pylint: disable=inconsistent-return-statement _request = build_batch_stop_pool_resize_internal_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -4552,28 +4552,28 @@ def replace_pool_properties( # pylint: disable=inconsistent-return-statements pool_id: str, pool: _models.BatchPoolReplaceOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Updates the properties of the specified Pool. - This fully replaces all the updatable properties of the Pool. For example, if - the Pool has a StartTask associated with it and if StartTask is not specified - with this request, then the Batch service will remove the existing StartTask. + This fully replaces all the updatable properties of the Pool. For example, if the Pool has a + StartTask associated with it and if StartTask is not specified with this request, then the + Batch service will remove the existing StartTask. :param pool_id: The ID of the Pool to update. Required. :type pool_id: str :param pool: The options to use for replacing properties on the pool. Required. :type pool: ~azure.batch.models.BatchPoolReplaceOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -4598,8 +4598,8 @@ def replace_pool_properties( # pylint: disable=inconsistent-return-statements _request = build_batch_replace_pool_properties_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -4642,8 +4642,8 @@ def _remove_nodes_internal( # pylint: disable=inconsistent-return-statements pool_id: str, remove_options: _models.BatchNodeRemoveOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -4652,22 +4652,22 @@ def _remove_nodes_internal( # pylint: disable=inconsistent-return-statements ) -> None: """Removes Compute Nodes from the specified Pool. - This operation can only run when the allocation state of the Pool is steady. - When this operation runs, the allocation state changes from steady to resizing. - Each request may remove up to 100 nodes. + This operation can only run when the allocation state of the Pool is steady. When this + operation runs, the allocation state changes from steady to resizing. Each request may remove + up to 100 nodes. :param pool_id: The ID of the Pool to get. Required. :type pool_id: str :param remove_options: The options to use for removing the node. Required. :type remove_options: ~azure.batch.models.BatchNodeRemoveOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -4713,8 +4713,8 @@ def _remove_nodes_internal( # pylint: disable=inconsistent-return-statements _request = build_batch_remove_nodes_internal_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -4759,8 +4759,8 @@ def _remove_nodes_internal( # pylint: disable=inconsistent-return-statements def list_supported_images( self, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any @@ -4769,14 +4769,14 @@ def list_supported_images( Lists all Virtual Machine Images supported by the Azure Batch service. - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -4806,8 +4806,8 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_supported_images_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, api_version=self._config.api_version, @@ -4875,24 +4875,23 @@ def get_next(next_link=None): def list_pool_node_counts( self, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any ) -> ItemPaged["_models.BatchPoolNodeCounts"]: - """Gets the number of Compute Nodes in each state, grouped by Pool. Note that the - numbers returned may not always be up to date. If you need exact node counts, - use a list query. + """Gets the number of Compute Nodes in each state, grouped by Pool. Note that the numbers returned + may not always be up to date. If you need exact node counts, use a list query. - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -4922,8 +4921,8 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_pool_node_counts_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, api_version=self._config.api_version, @@ -4992,8 +4991,8 @@ def _delete_job_internal( # pylint: disable=inconsistent-return-statements self, job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -5003,25 +5002,24 @@ def _delete_job_internal( # pylint: disable=inconsistent-return-statements ) -> None: """Deletes a Job. - Deleting a Job also deletes all Tasks that are part of that Job, and all Job - statistics. This also overrides the retention period for Task data; that is, if - the Job contains Tasks which are still retained on Compute Nodes, the Batch - services deletes those Tasks' working directories and all their contents. When - a Delete Job request is received, the Batch service sets the Job to the - deleting state. All update operations on a Job that is in deleting state will - fail with status code 409 (Conflict), with additional information indicating - that the Job is being deleted. + Deleting a Job also deletes all Tasks that are part of that Job, and all Job statistics. This + also overrides the retention period for Task data; that is, if the Job contains Tasks which are + still retained on Compute Nodes, the Batch services deletes those Tasks' working directories + and all their contents. When a Delete Job request is received, the Batch service sets the Job + to the deleting state. All update operations on a Job that is in deleting state will fail with + status code 409 (Conflict), with additional information indicating that the Job is being + deleted. :param job_id: The ID of the Job to delete. Required. :type job_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -5065,8 +5063,8 @@ def _delete_job_internal( # pylint: disable=inconsistent-return-statements _request = build_batch_delete_job_internal_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, force=force, @@ -5108,8 +5106,8 @@ def get_job( self, job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, @@ -5124,14 +5122,14 @@ def get_job( :param job_id: The ID of the Job. Required. :type job_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -5176,8 +5174,8 @@ def get_job( _request = build_batch_get_job_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, select=select, @@ -5235,8 +5233,8 @@ def update_job( # pylint: disable=inconsistent-return-statements job_id: str, job: _models.BatchJobUpdateOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -5245,22 +5243,22 @@ def update_job( # pylint: disable=inconsistent-return-statements ) -> None: """Updates the properties of the specified Job. - This replaces only the Job properties specified in the request. For example, if - the Job has constraints, and a request does not specify the constraints - element, then the Job keeps the existing constraints. + This replaces only the Job properties specified in the request. For example, if the Job has + constraints, and a request does not specify the constraints element, then the Job keeps the + existing constraints. :param job_id: The ID of the Job whose properties you want to update. Required. :type job_id: str :param job: The options to use for updating the Job. Required. :type job: ~azure.batch.models.BatchJobUpdateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -5306,8 +5304,8 @@ def update_job( # pylint: disable=inconsistent-return-statements _request = build_batch_update_job_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -5354,8 +5352,8 @@ def replace_job( # pylint: disable=inconsistent-return-statements job_id: str, job: _models.BatchJob, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -5364,22 +5362,22 @@ def replace_job( # pylint: disable=inconsistent-return-statements ) -> None: """Updates the properties of the specified Job. - This fully replaces all the updatable properties of the Job. For example, if - the Job has constraints associated with it and if constraints is not specified - with this request, then the Batch service will remove the existing constraints. + This fully replaces all the updatable properties of the Job. For example, if the Job has + constraints associated with it and if constraints is not specified with this request, then the + Batch service will remove the existing constraints. :param job_id: The ID of the Job whose properties you want to update. Required. :type job_id: str :param job: A job with updated properties. Required. :type job: ~azure.batch.models.BatchJob - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -5425,8 +5423,8 @@ def replace_job( # pylint: disable=inconsistent-return-statements _request = build_batch_replace_job_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -5473,8 +5471,8 @@ def _disable_job_internal( # pylint: disable=inconsistent-return-statements job_id: str, disable_options: _models.BatchJobDisableOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -5483,27 +5481,26 @@ def _disable_job_internal( # pylint: disable=inconsistent-return-statements ) -> None: """Disables the specified Job, preventing new Tasks from running. - The Batch Service immediately moves the Job to the disabling state. Batch then - uses the disableTasks parameter to determine what to do with the currently - running Tasks of the Job. The Job remains in the disabling state until the - disable operation is completed and all Tasks have been dealt with according to - the disableTasks option; the Job then moves to the disabled state. No new Tasks - are started under the Job until it moves back to active state. If you try to - disable a Job that is in any state other than active, disabling, or disabled, - the request fails with status code 409. + The Batch Service immediately moves the Job to the disabling state. Batch then uses the + disableTasks parameter to determine what to do with the currently running Tasks of the Job. The + Job remains in the disabling state until the disable operation is completed and all Tasks have + been dealt with according to the disableTasks option; the Job then moves to the disabled state. + No new Tasks are started under the Job until it moves back to active state. If you try to + disable a Job that is in any state other than active, disabling, or disabled, the request fails + with status code 409. :param job_id: The ID of the Job to disable. Required. :type job_id: str :param disable_options: The options to use for disabling the Job. Required. :type disable_options: ~azure.batch.models.BatchJobDisableOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -5549,8 +5546,8 @@ def _disable_job_internal( # pylint: disable=inconsistent-return-statements _request = build_batch_disable_job_internal_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -5596,8 +5593,8 @@ def _enable_job_internal( # pylint: disable=inconsistent-return-statements self, job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -5606,23 +5603,22 @@ def _enable_job_internal( # pylint: disable=inconsistent-return-statements ) -> None: """Enables the specified Job, allowing new Tasks to run. - When you call this API, the Batch service sets a disabled Job to the enabling - state. After the this operation is completed, the Job moves to the active - state, and scheduling of new Tasks under the Job resumes. The Batch service - does not allow a Task to remain in the active state for more than 180 days. - Therefore, if you enable a Job containing active Tasks which were added more - than 180 days ago, those Tasks will not run. + When you call this API, the Batch service sets a disabled Job to the enabling state. After the + this operation is completed, the Job moves to the active state, and scheduling of new Tasks + under the Job resumes. The Batch service does not allow a Task to remain in the active state + for more than 180 days. Therefore, if you enable a Job containing active Tasks which were added + more than 180 days ago, those Tasks will not run. :param job_id: The ID of the Job to enable. Required. :type job_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -5663,8 +5659,8 @@ def _enable_job_internal( # pylint: disable=inconsistent-return-statements _request = build_batch_enable_job_internal_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -5709,8 +5705,8 @@ def _terminate_job_internal( # pylint: disable=inconsistent-return-statements job_id: str, options: Optional[_models.BatchJobTerminateOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -5720,25 +5716,24 @@ def _terminate_job_internal( # pylint: disable=inconsistent-return-statements ) -> None: """Terminates the specified Job, marking it as completed. - When a Terminate Job request is received, the Batch service sets the Job to the - terminating state. The Batch service then terminates any running Tasks - associated with the Job and runs any required Job release Tasks. Then the Job - moves into the completed state. If there are any Tasks in the Job in the active - state, they will remain in the active state. Once a Job is terminated, new - Tasks cannot be added and any remaining active Tasks will not be scheduled. + When a Terminate Job request is received, the Batch service sets the Job to the terminating + state. The Batch service then terminates any running Tasks associated with the Job and runs any + required Job release Tasks. Then the Job moves into the completed state. If there are any Tasks + in the Job in the active state, they will remain in the active state. Once a Job is terminated, + new Tasks cannot be added and any remaining active Tasks will not be scheduled. :param job_id: The ID of the Job to terminate. Required. :type job_id: str :param options: The options to use for terminating the Job. Default value is None. :type options: ~azure.batch.models.BatchJobTerminateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -5791,8 +5786,8 @@ def _terminate_job_internal( # pylint: disable=inconsistent-return-statements _request = build_batch_terminate_job_internal_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, force=force, @@ -5839,32 +5834,30 @@ def create_job( # pylint: disable=inconsistent-return-statements self, job: _models.BatchJobCreateOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Creates a Job to the specified Account. - The Batch service supports two ways to control the work done as part of a Job. - In the first approach, the user specifies a Job Manager Task. The Batch service - launches this Task when it is ready to start the Job. The Job Manager Task - controls all other Tasks that run under this Job, by using the Task APIs. In - the second approach, the user directly controls the execution of Tasks under an - active Job, by using the Task APIs. Also note: when naming Jobs, avoid - including sensitive information such as user names or secret project names. - This information may appear in telemetry logs accessible to Microsoft Support - engineers. + The Batch service supports two ways to control the work done as part of a Job. In the first + approach, the user specifies a Job Manager Task. The Batch service launches this Task when it + is ready to start the Job. The Job Manager Task controls all other Tasks that run under this + Job, by using the Task APIs. In the second approach, the user directly controls the execution + of Tasks under an active Job, by using the Task APIs. Also note: when naming Jobs, avoid + including sensitive information such as user names or secret project names. This information + may appear in telemetry logs accessible to Microsoft Support engineers. :param job: The Job to be created. Required. :type job: ~azure.batch.models.BatchJobCreateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -5888,8 +5881,8 @@ def create_job( # pylint: disable=inconsistent-return-statements _content = json.dumps(job, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_create_job_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -5930,8 +5923,8 @@ def create_job( # pylint: disable=inconsistent-return-statements def list_jobs( self, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -5942,14 +5935,14 @@ def list_jobs( Lists all of the Jobs in the specified Account. - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -5983,8 +5976,8 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_jobs_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, select=select, @@ -6055,8 +6048,8 @@ def list_jobs_from_schedule( self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -6070,14 +6063,14 @@ def list_jobs_from_schedule( :param job_schedule_id: The ID of the Job Schedule from which you want to get a list of Jobs. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -6112,8 +6105,8 @@ def prepare_request(next_link=None): _request = build_batch_list_jobs_from_schedule_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, select=select, @@ -6184,33 +6177,32 @@ def list_job_preparation_and_release_task_status( # pylint: disable=name-too-lo self, job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, **kwargs: Any ) -> ItemPaged["_models.BatchJobPreparationAndReleaseTaskStatus"]: - """Lists the execution status of the Job Preparation and Job Release Task for the - specified Job across the Compute Nodes where the Job has run. - - This API returns the Job Preparation and Job Release Task status on all Compute - Nodes that have run the Job Preparation or Job Release Task. This includes - Compute Nodes which have since been removed from the Pool. If this API is - invoked on a Job which has no Job Preparation or Job Release Task, the Batch - service returns HTTP status code 409 (Conflict) with an error code of + """Lists the execution status of the Job Preparation and Job Release Task for the specified Job + across the Compute Nodes where the Job has run. + + This API returns the Job Preparation and Job Release Task status on all Compute Nodes that have + run the Job Preparation or Job Release Task. This includes Compute Nodes which have since been + removed from the Pool. If this API is invoked on a Job which has no Job Preparation or Job + Release Task, the Batch service returns HTTP status code 409 (Conflict) with an error code of JobPreparationTaskNotSpecified. :param job_id: The ID of the Job. Required. :type job_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -6244,8 +6236,8 @@ def prepare_request(next_link=None): _request = build_batch_list_job_preparation_and_release_task_status_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, select=select, @@ -6314,25 +6306,30 @@ def get_next(next_link=None): @distributed_trace def get_job_task_counts( - self, job_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + self, + job_id: str, + *, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, + **kwargs: Any ) -> _models.BatchTaskCountsResult: """Gets the Task counts for the specified Job. - Task counts provide a count of the Tasks by active, running or completed Task - state, and a count of Tasks which succeeded or failed. Tasks in the preparing - state are counted as running. Note that the numbers returned may not always be - up to date. If you need exact task counts, use a list query. + Task counts provide a count of the Tasks by active, running or completed Task state, and a + count of Tasks which succeeded or failed. Tasks in the preparing state are counted as running. + Note that the numbers returned may not always be up to date. If you need exact task counts, use + a list query. :param job_id: The ID of the Job. Required. :type job_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: BatchTaskCountsResult. The BatchTaskCountsResult is compatible with MutableMapping :rtype: ~azure.batch.models.BatchTaskCountsResult :raises ~azure.core.exceptions.HttpResponseError: @@ -6352,8 +6349,8 @@ def get_job_task_counts( _request = build_batch_get_job_task_counts_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, api_version=self._config.api_version, headers=_headers, params=_params, @@ -6404,8 +6401,8 @@ def job_schedule_exists( self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -6418,14 +6415,14 @@ def job_schedule_exists( :param job_schedule_id: The ID of the Job Schedule which you want to check. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -6466,8 +6463,8 @@ def job_schedule_exists( _request = build_batch_job_schedule_exists_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -6512,8 +6509,8 @@ def _delete_job_schedule_internal( # pylint: disable=inconsistent-return-statem self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -6523,22 +6520,22 @@ def _delete_job_schedule_internal( # pylint: disable=inconsistent-return-statem ) -> None: """Deletes a Job Schedule from the specified Account. - When you delete a Job Schedule, this also deletes all Jobs and Tasks under that - schedule. When Tasks are deleted, all the files in their working directories on - the Compute Nodes are also deleted (the retention period is ignored). The Job - Schedule statistics are no longer accessible once the Job Schedule is deleted, - though they are still counted towards Account lifetime statistics. + When you delete a Job Schedule, this also deletes all Jobs and Tasks under that schedule. When + Tasks are deleted, all the files in their working directories on the Compute Nodes are also + deleted (the retention period is ignored). The Job Schedule statistics are no longer accessible + once the Job Schedule is deleted, though they are still counted towards Account lifetime + statistics. :param job_schedule_id: The ID of the Job Schedule to delete. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -6582,8 +6579,8 @@ def _delete_job_schedule_internal( # pylint: disable=inconsistent-return-statem _request = build_batch_delete_job_schedule_internal_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, force=force, @@ -6625,8 +6622,8 @@ def get_job_schedule( self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, @@ -6639,14 +6636,14 @@ def get_job_schedule( :param job_schedule_id: The ID of the Job Schedule to get. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -6691,8 +6688,8 @@ def get_job_schedule( _request = build_batch_get_job_schedule_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, select=select, @@ -6750,8 +6747,8 @@ def update_job_schedule( # pylint: disable=inconsistent-return-statements job_schedule_id: str, job_schedule: _models.BatchJobScheduleUpdateOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -6760,24 +6757,23 @@ def update_job_schedule( # pylint: disable=inconsistent-return-statements ) -> None: """Updates the properties of the specified Job Schedule. - This replaces only the Job Schedule properties specified in the request. For - example, if the schedule property is not specified with this request, then the - Batch service will keep the existing schedule. Changes to a Job Schedule only - impact Jobs created by the schedule after the update has taken place; currently - running Jobs are unaffected. + This replaces only the Job Schedule properties specified in the request. For example, if the + schedule property is not specified with this request, then the Batch service will keep the + existing schedule. Changes to a Job Schedule only impact Jobs created by the schedule after the + update has taken place; currently running Jobs are unaffected. :param job_schedule_id: The ID of the Job Schedule to update. Required. :type job_schedule_id: str :param job_schedule: The options to use for updating the Job Schedule. Required. :type job_schedule: ~azure.batch.models.BatchJobScheduleUpdateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -6823,8 +6819,8 @@ def update_job_schedule( # pylint: disable=inconsistent-return-statements _request = build_batch_update_job_schedule_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -6871,8 +6867,8 @@ def replace_job_schedule( # pylint: disable=inconsistent-return-statements job_schedule_id: str, job_schedule: _models.BatchJobSchedule, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -6881,24 +6877,23 @@ def replace_job_schedule( # pylint: disable=inconsistent-return-statements ) -> None: """Updates the properties of the specified Job Schedule. - This fully replaces all the updatable properties of the Job Schedule. For - example, if the schedule property is not specified with this request, then the - Batch service will remove the existing schedule. Changes to a Job Schedule only - impact Jobs created by the schedule after the update has taken place; currently - running Jobs are unaffected. + This fully replaces all the updatable properties of the Job Schedule. For example, if the + schedule property is not specified with this request, then the Batch service will remove the + existing schedule. Changes to a Job Schedule only impact Jobs created by the schedule after the + update has taken place; currently running Jobs are unaffected. :param job_schedule_id: The ID of the Job Schedule to update. Required. :type job_schedule_id: str :param job_schedule: A Job Schedule with updated properties. Required. :type job_schedule: ~azure.batch.models.BatchJobSchedule - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -6944,8 +6939,8 @@ def replace_job_schedule( # pylint: disable=inconsistent-return-statements _request = build_batch_replace_job_schedule_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -6991,8 +6986,8 @@ def disable_job_schedule( # pylint: disable=inconsistent-return-statements self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -7005,14 +7000,14 @@ def disable_job_schedule( # pylint: disable=inconsistent-return-statements :param job_schedule_id: The ID of the Job Schedule to disable. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -7053,8 +7048,8 @@ def disable_job_schedule( # pylint: disable=inconsistent-return-statements _request = build_batch_disable_job_schedule_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -7098,8 +7093,8 @@ def enable_job_schedule( # pylint: disable=inconsistent-return-statements self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -7112,14 +7107,14 @@ def enable_job_schedule( # pylint: disable=inconsistent-return-statements :param job_schedule_id: The ID of the Job Schedule to enable. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -7160,8 +7155,8 @@ def enable_job_schedule( # pylint: disable=inconsistent-return-statements _request = build_batch_enable_job_schedule_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -7205,8 +7200,8 @@ def _terminate_job_schedule_internal( # pylint: disable=inconsistent-return-sta self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -7220,14 +7215,14 @@ def _terminate_job_schedule_internal( # pylint: disable=inconsistent-return-sta :param job_schedule_id: The ID of the Job Schedule to terminates. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -7272,8 +7267,8 @@ def _terminate_job_schedule_internal( # pylint: disable=inconsistent-return-sta _request = build_batch_terminate_job_schedule_internal_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, force=force, @@ -7318,8 +7313,8 @@ def create_job_schedule( # pylint: disable=inconsistent-return-statements self, job_schedule: _models.BatchJobScheduleCreateOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Creates a Job Schedule to the specified Account. @@ -7328,14 +7323,14 @@ def create_job_schedule( # pylint: disable=inconsistent-return-statements :param job_schedule: The Job Schedule to be created. Required. :type job_schedule: ~azure.batch.models.BatchJobScheduleCreateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -7359,8 +7354,8 @@ def create_job_schedule( # pylint: disable=inconsistent-return-statements _content = json.dumps(job_schedule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_create_job_schedule_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -7401,8 +7396,8 @@ def create_job_schedule( # pylint: disable=inconsistent-return-statements def list_job_schedules( self, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -7413,14 +7408,14 @@ def list_job_schedules( Lists all of the Job Schedules in the specified Account. - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -7454,8 +7449,8 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_job_schedules_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, select=select, @@ -7527,28 +7522,28 @@ def create_task( # pylint: disable=inconsistent-return-statements job_id: str, task: _models.BatchTaskCreateOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Creates a Task to the specified Job. - The maximum lifetime of a Task from addition to completion is 180 days. If a - Task has not completed within 180 days of being added it will be terminated by - the Batch service and left in whatever state it was in at that time. + The maximum lifetime of a Task from addition to completion is 180 days. If a Task has not + completed within 180 days of being added it will be terminated by the Batch service and left in + whatever state it was in at that time. :param job_id: The ID of the Job to which the Task is to be created. Required. :type job_id: str :param task: The Task to be created. Required. :type task: ~azure.batch.models.BatchTaskCreateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -7573,8 +7568,8 @@ def create_task( # pylint: disable=inconsistent-return-statements _request = build_batch_create_task_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -7616,8 +7611,8 @@ def list_tasks( self, job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -7626,20 +7621,19 @@ def list_tasks( ) -> ItemPaged["_models.BatchTask"]: """Lists all of the Tasks that are associated with the specified Job. - For multi-instance Tasks, information such as affinityId, executionInfo and - nodeInfo refer to the primary Task. Use the list subtasks API to retrieve - information about subtasks. + For multi-instance Tasks, information such as affinityId, executionInfo and nodeInfo refer to + the primary Task. Use the list subtasks API to retrieve information about subtasks. :param job_id: The ID of the Job. Required. :type job_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -7674,8 +7668,8 @@ def prepare_request(next_link=None): _request = build_batch_list_tasks_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, select=select, @@ -7747,39 +7741,37 @@ def create_task_collection( job_id: str, task_collection: _models.BatchTaskGroup, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.BatchCreateTaskCollectionResult: """Adds a collection of Tasks to the specified Job. - Note that each Task must have a unique ID. The Batch service may not return the - results for each Task in the same order the Tasks were submitted in this - request. If the server times out or the connection is closed during the - request, the request may have been partially or fully processed, or not at all. - In such cases, the user should re-issue the request. Note that it is up to the - user to correctly handle failures when re-issuing a request. For example, you - should use the same Task IDs during a retry so that if the prior operation - succeeded, the retry will not create extra Tasks unexpectedly. If the response - contains any Tasks which failed to add, a client can retry the request. In a - retry, it is most efficient to resubmit only Tasks that failed to add, and to - omit Tasks that were successfully added on the first attempt. The maximum - lifetime of a Task from addition to completion is 180 days. If a Task has not - completed within 180 days of being added it will be terminated by the Batch - service and left in whatever state it was in at that time. + Note that each Task must have a unique ID. The Batch service may not return the results for + each Task in the same order the Tasks were submitted in this request. If the server times out + or the connection is closed during the request, the request may have been partially or fully + processed, or not at all. In such cases, the user should re-issue the request. Note that it is + up to the user to correctly handle failures when re-issuing a request. For example, you should + use the same Task IDs during a retry so that if the prior operation succeeded, the retry will + not create extra Tasks unexpectedly. If the response contains any Tasks which failed to add, a + client can retry the request. In a retry, it is most efficient to resubmit only Tasks that + failed to add, and to omit Tasks that were successfully added on the first attempt. The maximum + lifetime of a Task from addition to completion is 180 days. If a Task has not completed within + 180 days of being added it will be terminated by the Batch service and left in whatever state + it was in at that time. :param job_id: The ID of the Job to which the Task collection is to be added. Required. :type job_id: str :param task_collection: The Tasks to be added. Required. :type task_collection: ~azure.batch.models.BatchTaskGroup - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: BatchCreateTaskCollectionResult. The BatchCreateTaskCollectionResult is compatible with MutableMapping :rtype: ~azure.batch.models.BatchCreateTaskCollectionResult @@ -7805,8 +7797,8 @@ def create_task_collection( _request = build_batch_create_task_collection_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -7860,8 +7852,8 @@ def delete_task( # pylint: disable=inconsistent-return-statements job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -7870,24 +7862,23 @@ def delete_task( # pylint: disable=inconsistent-return-statements ) -> None: """Deletes a Task from the specified Job. - When a Task is deleted, all of the files in its directory on the Compute Node - where it ran are also deleted (regardless of the retention time). For - multi-instance Tasks, the delete Task operation applies synchronously to the - primary task; subtasks and their files are then deleted asynchronously in the - background. + When a Task is deleted, all of the files in its directory on the Compute Node where it ran are + also deleted (regardless of the retention time). For multi-instance Tasks, the delete Task + operation applies synchronously to the primary task; subtasks and their files are then deleted + asynchronously in the background. :param job_id: The ID of the Job from which to delete the Task. Required. :type job_id: str :param task_id: The ID of the Task to delete. Required. :type task_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -7929,8 +7920,8 @@ def delete_task( # pylint: disable=inconsistent-return-statements _request = build_batch_delete_task_request( job_id=job_id, task_id=task_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -7972,8 +7963,8 @@ def get_task( job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, @@ -7984,22 +7975,21 @@ def get_task( ) -> _models.BatchTask: """Gets information about the specified Task. - For multi-instance Tasks, information such as affinityId, executionInfo and - nodeInfo refer to the primary Task. Use the list subtasks API to retrieve - information about subtasks. + For multi-instance Tasks, information such as affinityId, executionInfo and nodeInfo refer to + the primary Task. Use the list subtasks API to retrieve information about subtasks. :param job_id: The ID of the Job that contains the Task. Required. :type job_id: str :param task_id: The ID of the Task to get information about. Required. :type task_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -8045,8 +8035,8 @@ def get_task( _request = build_batch_get_task_request( job_id=job_id, task_id=task_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, select=select, @@ -8106,8 +8096,8 @@ def replace_task( # pylint: disable=inconsistent-return-statements task_id: str, task: _models.BatchTask, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -8122,14 +8112,14 @@ def replace_task( # pylint: disable=inconsistent-return-statements :type task_id: str :param task: The Task to update. Required. :type task: ~azure.batch.models.BatchTask - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -8176,8 +8166,8 @@ def replace_task( # pylint: disable=inconsistent-return-statements _request = build_batch_replace_task_request( job_id=job_id, task_id=task_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -8224,13 +8214,12 @@ def list_sub_tasks( job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, **kwargs: Any ) -> ItemPaged["_models.BatchSubtask"]: - """Lists all of the subtasks that are associated with the specified multi-instance - Task. + """Lists all of the subtasks that are associated with the specified multi-instance Task. If the Task is not a multi-instance Task then this returns an empty collection. @@ -8238,14 +8227,14 @@ def list_sub_tasks( :type job_id: str :param task_id: The ID of the Task. Required. :type task_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] :return: An iterator like instance of BatchSubtask @@ -8271,8 +8260,8 @@ def prepare_request(next_link=None): _request = build_batch_list_sub_tasks_request( job_id=job_id, task_id=task_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, select=select, api_version=self._config.api_version, headers=_headers, @@ -8341,8 +8330,8 @@ def terminate_task( # pylint: disable=inconsistent-return-statements job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -8351,22 +8340,22 @@ def terminate_task( # pylint: disable=inconsistent-return-statements ) -> None: """Terminates the specified Task. - When the Task has been terminated, it moves to the completed state. For - multi-instance Tasks, the terminate Task operation applies synchronously to the - primary task; subtasks are then terminated asynchronously in the background. + When the Task has been terminated, it moves to the completed state. For multi-instance Tasks, + the terminate Task operation applies synchronously to the primary task; subtasks are then + terminated asynchronously in the background. :param job_id: The ID of the Job containing the Task. Required. :type job_id: str :param task_id: The ID of the Task to terminate. Required. :type task_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -8408,8 +8397,8 @@ def terminate_task( # pylint: disable=inconsistent-return-statements _request = build_batch_terminate_task_request( job_id=job_id, task_id=task_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -8454,37 +8443,35 @@ def reactivate_task( # pylint: disable=inconsistent-return-statements job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - """Reactivates a Task, allowing it to run again even if its retry count has been - exhausted. + """Reactivates a Task, allowing it to run again even if its retry count has been exhausted. - Reactivation makes a Task eligible to be retried again up to its maximum retry - count. The Task's state is changed to active. As the Task is no longer in the - completed state, any previous exit code or failure information is no longer - available after reactivation. Each time a Task is reactivated, its retry count - is reset to 0. Reactivation will fail for Tasks that are not completed or that - previously completed successfully (with an exit code of 0). Additionally, it - will fail if the Job has completed (or is terminating or deleting). + Reactivation makes a Task eligible to be retried again up to its maximum retry count. The + Task's state is changed to active. As the Task is no longer in the completed state, any + previous exit code or failure information is no longer available after reactivation. Each time + a Task is reactivated, its retry count is reset to 0. Reactivation will fail for Tasks that are + not completed or that previously completed successfully (with an exit code of 0). Additionally, + it will fail if the Job has completed (or is terminating or deleting). :param job_id: The ID of the Job containing the Task. Required. :type job_id: str :param task_id: The ID of the Task to reactivate. Required. :type task_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -8526,8 +8513,8 @@ def reactivate_task( # pylint: disable=inconsistent-return-statements _request = build_batch_reactivate_task_request( job_id=job_id, task_id=task_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -8573,8 +8560,8 @@ def delete_task_file( # pylint: disable=inconsistent-return-statements task_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, recursive: Optional[bool] = None, **kwargs: Any ) -> None: @@ -8588,14 +8575,14 @@ def delete_task_file( # pylint: disable=inconsistent-return-statements :type task_id: str :param file_path: The path to the Task file that you want to get the content of. Required. :type file_path: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword recursive: Whether to delete children of a directory. If the filePath parameter represents a directory instead of a file, you can set recursive to true to delete the @@ -8623,8 +8610,8 @@ def delete_task_file( # pylint: disable=inconsistent-return-statements job_id=job_id, task_id=task_id, file_path=file_path, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, recursive=recursive, api_version=self._config.api_version, headers=_headers, @@ -8664,8 +8651,8 @@ def get_task_file( task_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, ocp_range: Optional[str] = None, @@ -8679,14 +8666,14 @@ def get_task_file( :type task_id: str :param file_path: The path to the Task file that you want to get the content of. Required. :type file_path: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -8722,8 +8709,8 @@ def get_task_file( job_id=job_id, task_id=task_id, file_path=file_path, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, ocp_range=ocp_range, @@ -8784,8 +8771,8 @@ def _get_task_file_properties_internal( task_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, **kwargs: Any @@ -8798,14 +8785,14 @@ def _get_task_file_properties_internal( :type task_id: str :param file_path: The path to the Task file that you want to get the content of. Required. :type file_path: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -8837,8 +8824,8 @@ def _get_task_file_properties_internal( job_id=job_id, task_id=task_id, file_path=file_path, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, api_version=self._config.api_version, @@ -8888,8 +8875,8 @@ def list_task_files( job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, recursive: Optional[bool] = None, @@ -8903,14 +8890,14 @@ def list_task_files( :type job_id: str :param task_id: The ID of the Task whose files you want to list. Required. :type task_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -8946,8 +8933,8 @@ def prepare_request(next_link=None): _request = build_batch_list_task_files_request( job_id=job_id, task_id=task_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, recursive=recursive, @@ -9019,14 +9006,17 @@ def create_node_user( # pylint: disable=inconsistent-return-statements node_id: str, user: _models.BatchNodeUserCreateOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Adds a user Account to the specified Compute Node. - You can add a user Account to a Compute Node only when it is in the idle or - running state. + You can add a user Account to a Compute Node only when it is in the idle or running state. + Before you can remotely login to a Compute Node you must configure access ports for SSH and + RDP. For more information, see + `https://learn.microsoft.com/azure/batch/pool-endpoint-configuration + `_. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str @@ -9034,14 +9024,14 @@ def create_node_user( # pylint: disable=inconsistent-return-statements :type node_id: str :param user: The options to use for creating the user. Required. :type user: ~azure.batch.models.BatchNodeUserCreateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -9067,8 +9057,8 @@ def create_node_user( # pylint: disable=inconsistent-return-statements _request = build_batch_create_node_user_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -9112,14 +9102,17 @@ def delete_node_user( # pylint: disable=inconsistent-return-statements node_id: str, user_name: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Deletes a user Account from the specified Compute Node. - You can delete a user Account to a Compute Node only when it is in the idle or - running state. + You can delete a user Account to a Compute Node only when it is in the idle or running state. + Before you can remotely login to a Compute Node you must configure access ports for SSH and + RDP. For more information, see + `https://learn.microsoft.com/azure/batch/pool-endpoint-configuration + `_. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str @@ -9127,14 +9120,14 @@ def delete_node_user( # pylint: disable=inconsistent-return-statements :type node_id: str :param user_name: The name of the user Account to delete. Required. :type user_name: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -9156,8 +9149,8 @@ def delete_node_user( # pylint: disable=inconsistent-return-statements pool_id=pool_id, node_id=node_id, user_name=user_name, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, api_version=self._config.api_version, headers=_headers, params=_params, @@ -9195,18 +9188,18 @@ def replace_node_user( # pylint: disable=inconsistent-return-statements pool_id: str, node_id: str, user_name: str, - update_options: _models.BatchNodeUserUpdateOptions, + update_options: _models.BatchNodeUserReplaceOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Updates the password and expiration time of a user Account on the specified Compute Node. - This operation replaces of all the updatable properties of the Account. For - example, if the expiryTime element is not specified, the current value is - replaced with the default value, not left unmodified. You can update a user - Account on a Compute Node only when it is in the idle or running state. + This operation replaces of all the updatable properties of the Account. For example, if the + expiryTime element is not specified, the current value is replaced with the default value, not + left unmodified. You can update a user Account on a Compute Node only when it is in the idle or + running state. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str @@ -9215,15 +9208,15 @@ def replace_node_user( # pylint: disable=inconsistent-return-statements :param user_name: The name of the user Account to update. Required. :type user_name: str :param update_options: The options to use for updating the user. Required. - :type update_options: ~azure.batch.models.BatchNodeUserUpdateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :type update_options: ~azure.batch.models.BatchNodeUserReplaceOptions + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -9250,8 +9243,8 @@ def replace_node_user( # pylint: disable=inconsistent-return-statements pool_id=pool_id, node_id=node_id, user_name=user_name, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -9294,8 +9287,8 @@ def get_node( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, **kwargs: Any ) -> _models.BatchNode: @@ -9307,14 +9300,14 @@ def get_node( :type pool_id: str :param node_id: The ID of the Compute Node that you want to get information about. Required. :type node_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] :return: BatchNode. The BatchNode is compatible with MutableMapping @@ -9337,8 +9330,8 @@ def get_node( _request = build_batch_get_node_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, select=select, api_version=self._config.api_version, headers=_headers, @@ -9392,8 +9385,8 @@ def _reboot_node_internal( # pylint: disable=inconsistent-return-statements node_id: str, options: Optional[_models.BatchNodeRebootOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Restarts the specified Compute Node. @@ -9406,14 +9399,14 @@ def _reboot_node_internal( # pylint: disable=inconsistent-return-statements :type node_id: str :param options: The options to use for rebooting the Compute Node. Default value is None. :type options: ~azure.batch.models.BatchNodeRebootOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -9443,8 +9436,8 @@ def _reboot_node_internal( # pylint: disable=inconsistent-return-statements _request = build_batch_reboot_node_internal_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -9487,8 +9480,8 @@ def _start_node_internal( # pylint: disable=inconsistent-return-statements pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Starts the specified Compute Node. @@ -9499,14 +9492,14 @@ def _start_node_internal( # pylint: disable=inconsistent-return-statements :type pool_id: str :param node_id: The ID of the Compute Node that you want to restart. Required. :type node_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -9527,8 +9520,8 @@ def _start_node_internal( # pylint: disable=inconsistent-return-statements _request = build_batch_start_node_internal_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, api_version=self._config.api_version, headers=_headers, params=_params, @@ -9570,15 +9563,15 @@ def _reimage_node_internal( # pylint: disable=inconsistent-return-statements node_id: str, options: Optional[_models.BatchNodeReimageOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Reinstalls the operating system on the specified Compute Node. - You can reinstall the operating system on a Compute Node only if it is in an - idle or running state. This API can be invoked only on Pools created with the - cloud service configuration property. + You can reinstall the operating system on a Compute Node only if it is in an idle or running + state. This API can be invoked only on Pools created with the cloud service configuration + property. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str @@ -9586,14 +9579,14 @@ def _reimage_node_internal( # pylint: disable=inconsistent-return-statements :type node_id: str :param options: The options to use for reimaging the Compute Node. Default value is None. :type options: ~azure.batch.models.BatchNodeReimageOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -9623,8 +9616,8 @@ def _reimage_node_internal( # pylint: disable=inconsistent-return-statements _request = build_batch_reimage_node_internal_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -9668,8 +9661,8 @@ def _deallocate_node_internal( # pylint: disable=inconsistent-return-statements node_id: str, options: Optional[_models.BatchNodeDeallocateOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Deallocates the specified Compute Node. @@ -9682,14 +9675,14 @@ def _deallocate_node_internal( # pylint: disable=inconsistent-return-statements :type node_id: str :param options: The options to use for deallocating the Compute Node. Default value is None. :type options: ~azure.batch.models.BatchNodeDeallocateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -9719,8 +9712,8 @@ def _deallocate_node_internal( # pylint: disable=inconsistent-return-statements _request = build_batch_deallocate_node_internal_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -9764,14 +9757,14 @@ def disable_node_scheduling( # pylint: disable=inconsistent-return-statements node_id: str, options: Optional[_models.BatchNodeDisableSchedulingOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Disables Task scheduling on the specified Compute Node. - You can disable Task scheduling on a Compute Node only if its current - scheduling state is enabled. + You can disable Task scheduling on a Compute Node only if its current scheduling state is + enabled. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str @@ -9781,14 +9774,14 @@ def disable_node_scheduling( # pylint: disable=inconsistent-return-statements :param options: The options to use for disabling scheduling on the Compute Node. Default value is None. :type options: ~azure.batch.models.BatchNodeDisableSchedulingOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -9818,8 +9811,8 @@ def disable_node_scheduling( # pylint: disable=inconsistent-return-statements _request = build_batch_disable_node_scheduling_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -9862,28 +9855,28 @@ def enable_node_scheduling( # pylint: disable=inconsistent-return-statements pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Enables Task scheduling on the specified Compute Node. - You can enable Task scheduling on a Compute Node only if its current scheduling - state is disabled. + You can enable Task scheduling on a Compute Node only if its current scheduling state is + disabled. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str :param node_id: The ID of the Compute Node on which you want to enable Task scheduling. Required. :type node_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -9904,8 +9897,8 @@ def enable_node_scheduling( # pylint: disable=inconsistent-return-statements _request = build_batch_enable_node_scheduling_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, api_version=self._config.api_version, headers=_headers, params=_params, @@ -9946,28 +9939,30 @@ def get_node_remote_login_settings( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.BatchNodeRemoteLoginSettings: """Gets the settings required for remote login to a Compute Node. - Before you can remotely login to a Compute Node using the remote login settings, - you must create a user Account on the Compute Node. + Before you can remotely login to a Compute Node using the remote login settings, you must + create a user Account on the Compute Node and configure access ports for SSH and RDP. For more + information, see `https://learn.microsoft.com/azure/batch/pool-endpoint-configuration + `_. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str :param node_id: The ID of the Compute Node for which to obtain the remote login settings. Required. :type node_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: BatchNodeRemoteLoginSettings. The BatchNodeRemoteLoginSettings is compatible with MutableMapping :rtype: ~azure.batch.models.BatchNodeRemoteLoginSettings @@ -9989,8 +9984,8 @@ def get_node_remote_login_settings( _request = build_batch_get_node_remote_login_settings_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, api_version=self._config.api_version, headers=_headers, params=_params, @@ -10043,17 +10038,16 @@ def upload_node_logs( node_id: str, upload_options: _models.UploadBatchServiceLogsOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.UploadBatchServiceLogsResult: - """Upload Azure Batch service log files from the specified Compute Node to Azure - Blob Storage. + """Upload Azure Batch service log files from the specified Compute Node to Azure Blob Storage. - This is for gathering Azure Batch service log files in an automated fashion - from Compute Nodes if you are experiencing an error and wish to escalate to - Azure support. The Azure Batch service log files should be shared with Azure - support to aid in debugging issues with the Batch service. + This is for gathering Azure Batch service log files in an automated fashion from Compute Nodes + if you are experiencing an error and wish to escalate to Azure support. The Azure Batch service + log files should be shared with Azure support to aid in debugging issues with the Batch + service. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str @@ -10062,14 +10056,14 @@ def upload_node_logs( :type node_id: str :param upload_options: The Azure Batch service log files upload options. Required. :type upload_options: ~azure.batch.models.UploadBatchServiceLogsOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: UploadBatchServiceLogsResult. The UploadBatchServiceLogsResult is compatible with MutableMapping :rtype: ~azure.batch.models.UploadBatchServiceLogsResult @@ -10096,8 +10090,8 @@ def upload_node_logs( _request = build_batch_upload_node_logs_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -10150,8 +10144,8 @@ def list_nodes( self, pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -10163,14 +10157,14 @@ def list_nodes( :param pool_id: The ID of the Pool from which you want to list Compute Nodes. Required. :type pool_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -10203,8 +10197,8 @@ def prepare_request(next_link=None): _request = build_batch_list_nodes_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, select=select, @@ -10276,8 +10270,8 @@ def get_node_extension( node_id: str, extension_name: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, **kwargs: Any ) -> _models.BatchNodeVMExtension: @@ -10292,14 +10286,14 @@ def get_node_extension( :param extension_name: The name of the Compute Node Extension that you want to get information about. Required. :type extension_name: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] :return: BatchNodeVMExtension. The BatchNodeVMExtension is compatible with MutableMapping @@ -10323,8 +10317,8 @@ def get_node_extension( pool_id=pool_id, node_id=node_id, extension_name=extension_name, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, select=select, api_version=self._config.api_version, headers=_headers, @@ -10377,8 +10371,8 @@ def list_node_extensions( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, select: Optional[list[str]] = None, **kwargs: Any @@ -10391,14 +10385,14 @@ def list_node_extensions( :type pool_id: str :param node_id: The ID of the Compute Node that you want to list extensions. Required. :type node_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -10427,8 +10421,8 @@ def prepare_request(next_link=None): _request = build_batch_list_node_extensions_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, select=select, api_version=self._config.api_version, @@ -10499,8 +10493,8 @@ def delete_node_file( # pylint: disable=inconsistent-return-statements node_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, recursive: Optional[bool] = None, **kwargs: Any ) -> None: @@ -10514,14 +10508,14 @@ def delete_node_file( # pylint: disable=inconsistent-return-statements :type node_id: str :param file_path: The path to the file or directory. Required. :type file_path: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword recursive: Whether to delete children of a directory. If the filePath parameter represents a directory instead of a file, you can set recursive to true to delete the @@ -10549,8 +10543,8 @@ def delete_node_file( # pylint: disable=inconsistent-return-statements pool_id=pool_id, node_id=node_id, file_path=file_path, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, recursive=recursive, api_version=self._config.api_version, headers=_headers, @@ -10590,8 +10584,8 @@ def get_node_file( node_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, ocp_range: Optional[str] = None, @@ -10605,14 +10599,14 @@ def get_node_file( :type node_id: str :param file_path: The path to the file or directory. Required. :type file_path: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -10648,8 +10642,8 @@ def get_node_file( pool_id=pool_id, node_id=node_id, file_path=file_path, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, ocp_range=ocp_range, @@ -10710,8 +10704,8 @@ def _get_node_file_properties_internal( node_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, **kwargs: Any @@ -10724,14 +10718,14 @@ def _get_node_file_properties_internal( :type node_id: str :param file_path: The path to the file or directory. Required. :type file_path: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -10763,8 +10757,8 @@ def _get_node_file_properties_internal( pool_id=pool_id, node_id=node_id, file_path=file_path, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, api_version=self._config.api_version, @@ -10814,8 +10808,8 @@ def list_node_files( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, recursive: Optional[bool] = None, @@ -10829,14 +10823,14 @@ def list_node_files( :type pool_id: str :param node_id: The ID of the Compute Node whose files you want to list. Required. :type node_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -10870,8 +10864,8 @@ def prepare_request(next_link=None): _request = build_batch_list_node_files_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, recursive=recursive, diff --git a/sdk/batch/azure-batch/azure/batch/_operations/_patch.py b/sdk/batch/azure-batch/azure/batch/_operations/_patch.py index 09fdc1edec16..2dc2e3bc8824 100644 --- a/sdk/batch/azure-batch/azure/batch/_operations/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/_operations/_patch.py @@ -62,8 +62,8 @@ def begin_delete_job( self, job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -85,14 +85,14 @@ def begin_delete_job( :param job_id: The ID of the Job to delete. Required. :type job_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the inital request to + delete the Job, in seconds. The default is 30 seconds. If the value is larger than 30, the + default will be used instead. Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has been modified since the specified time. Default value is None. @@ -123,8 +123,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header PipelineResponse, self._delete_job_internal( job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, force=force, @@ -144,8 +144,8 @@ def begin_disable_job( job_id: str, disable_options: _models.BatchJobDisableOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -168,14 +168,14 @@ def begin_disable_job( :type job_id: str :param disable_options: The options to use for disabling the Job. Required. :type disable_options: ~azure.batch.models.BatchJobDisableOptions - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the inital request to + disable the Job, in seconds. The default is 30 seconds. If the value is larger than 30, the + default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -206,8 +206,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header self._disable_job_internal( job_id, disable_options=disable_options, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -225,8 +225,8 @@ def begin_enable_job( self, job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -244,14 +244,14 @@ def begin_enable_job( :param job_id: The ID of the Job to enable. Required. :type job_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the intal request to + enable the Job, in seconds. The default is 30 seconds. If the value is larger than 30, the + default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -281,8 +281,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header PipelineResponse, self._enable_job_internal( job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -300,8 +300,8 @@ def begin_delete_job_schedule( self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -320,14 +320,14 @@ def begin_delete_job_schedule( :param job_schedule_id: The ID of the Job Schedule to delete. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request to + delete the Job Schedule, in seconds. The default is 30 seconds. If the value is larger than 30, + the default will be used instead. Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has been modified since the specified time. Default value is None. @@ -355,8 +355,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header PipelineResponse, self._delete_job_schedule_internal( job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -376,8 +376,8 @@ def begin_delete_pool( self, pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -402,14 +402,14 @@ def begin_delete_pool( :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request + to delete the Pool, in seconds. The default is 30 seconds. If the value is larger than 30, + the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -439,8 +439,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header PipelineResponse, self._delete_pool_internal( pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -460,8 +460,8 @@ def begin_deallocate_node( node_id: str, options: Optional[_models.BatchNodeDeallocateOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, polling_interval: int = 5, **kwargs: Any ) -> LROPoller[None]: @@ -475,14 +475,14 @@ def begin_deallocate_node( :type node_id: str :param options: The options to use for deallocating the Compute Node. Default value is None. :type options: ~azure.batch.models.BatchNodeDeallocateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request to + deallocate the Compute Node, in seconds. The default is 30 seconds. If the value is larger + than 30, the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword polling_interval: The interval in seconds between polling attempts. Default value is 5. :paramtype polling_interval: int :return: None @@ -499,8 +499,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header pool_id, node_id, options=options, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, cls=capture_pipeline_response, **kwargs, ), @@ -516,8 +516,8 @@ def begin_reboot_node( node_id: str, options: Optional[_models.BatchNodeRebootOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, polling_interval: int = 5, **kwargs: Any ) -> LROPoller[None]: @@ -531,14 +531,14 @@ def begin_reboot_node( :type node_id: str :param options: The options to use for rebooting the Compute Node. Default value is None. :type options: ~azure.batch.models.BatchNodeRebootOptions - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request to + reboot the Compute Node, in seconds. The default is 30 seconds. If the value is larger than 30, + the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword polling_interval: The interval in seconds between polling attempts. Default value is 5. :paramtype polling_interval: int :return: None @@ -555,8 +555,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header pool_id, node_id, options=options, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, cls=capture_pipeline_response, **kwargs, ), @@ -572,8 +572,8 @@ def begin_reimage_node( node_id: str, options: Optional[_models.BatchNodeReimageOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, polling_interval: int = 5, **kwargs: Any ) -> LROPoller[None]: @@ -591,14 +591,14 @@ def begin_reimage_node( :type node_id: str :param options: The options to use for reimaging the Compute Node. Default value is None. :type options: ~azure.batch.models.BatchNodeReimageOptions - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request to + reimage the Compute Node, in seconds. The default is 30 seconds. If the value is larger than 30, + the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword polling_interval: The interval in seconds between polling attempts. Default value is 5. :paramtype polling_interval: int :return: None @@ -615,8 +615,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header pool_id, node_id, options=options, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, cls=capture_pipeline_response, **kwargs, ), @@ -631,8 +631,8 @@ def begin_remove_nodes( pool_id: str, remove_options: _models.BatchNodeRemoveOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -650,14 +650,14 @@ def begin_remove_nodes( :type pool_id: str :param remove_options: The options to use for removing the node. Required. :type remove_options: ~azure.batch.models.BatchNodeRemoveOptions - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request to + remove the Compute Node, in seconds. The default is 30 seconds. If the value is larger than 30, + the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -688,8 +688,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header self._remove_nodes_internal( pool_id, remove_options=remove_options, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -708,8 +708,8 @@ def begin_resize_pool( pool_id: str, resize_options: _models.BatchPoolResizeOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -731,14 +731,14 @@ def begin_resize_pool( :type pool_id: str :param resize_options: The options to use for resizing the pool. Required. :type resize_options: ~azure.batch.models.BatchPoolResizeOptions - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request + to resize the Pool, in seconds. The default is 30 seconds. If the value is larger than 30, + the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -769,8 +769,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header self._resize_pool_internal( pool_id, resize_options=resize_options, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -789,8 +789,8 @@ def begin_start_node( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, polling_interval: int = 5, **kwargs: Any ) -> LROPoller[None]: @@ -802,14 +802,14 @@ def begin_start_node( :type pool_id: str :param node_id: The ID of the Compute Node that you want to restart. Required. :type node_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request to + start the Compute Node, in seconds. The default is 30 seconds. If the value is larger than 30, + the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword polling_interval: The interval in seconds between polling attempts. Default value is 5. :paramtype polling_interval: int :return: None @@ -825,8 +825,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header self._start_node_internal( pool_id, node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, cls=capture_pipeline_response, **kwargs, ), @@ -840,8 +840,8 @@ def begin_stop_pool_resize( self, pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -861,14 +861,14 @@ def begin_stop_pool_resize( :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request + to stop the Pool resize, in seconds. The default is 30 seconds. If the value is larger than + 30, the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -898,8 +898,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header PipelineResponse, self._stop_pool_resize_internal( pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -918,8 +918,8 @@ def begin_terminate_job( job_id: str, options: Optional[_models.BatchJobTerminateOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -941,14 +941,14 @@ def begin_terminate_job( :type job_id: str :param options: The options to use for terminating the Job. Default value is None. :type options: ~azure.batch.models.BatchJobTerminateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request + to terminate the Job, in seconds. The default is 30 seconds. If the value is larger than 30, + the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -982,8 +982,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header self._terminate_job_internal( job_id, options=options, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, force=force, @@ -1002,8 +1002,8 @@ def begin_terminate_job_schedule( self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -1016,14 +1016,14 @@ def begin_terminate_job_schedule( :param job_schedule_id: The ID of the Job Schedule to terminates. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request to + terminate the Job Schedule, in seconds. The default is 30 seconds. If the value is larger than + 30, the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -1057,8 +1057,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header PipelineResponse, self._terminate_job_schedule_internal( job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, force=force, @@ -1080,10 +1080,10 @@ def create_tasks( self, job_id: str, task_collection: List[_models.BatchTaskCreateOptions], - concurrencies: int = 0, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + max_concurrency: int = 0, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.BatchCreateTaskCollectionResult: """Adds a collection of Tasks to the specified Job. @@ -1107,35 +1107,36 @@ def create_tasks( :type job_id: str :param task_collection: The Tasks to be added. Required. :type task_collection: ~azure.batch.models.BatchTaskAddCollectionResult - :param concurrencies: number of threads to use in parallel when adding tasks. If specified + :keyword max_concurrency: number of threads to use in parallel when adding tasks. If specified and greater than 0, will start additional threads to submit requests and wait for them to finish. Otherwise will submit create_task_collection requests sequentially on main thread - :type concurrencies: int - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype max_concurrency: int + :keyword service_timeout: The maximum time that the server can spend processing the equest to + create the task collection, in seconds. The default is 30 seconds. If the value is larger than + 30, the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: BatchTaskAddCollectionResult. The BatchTaskAddCollectionResult is compatible with MutableMapping :rtype: ~azure.batch.models.BatchTaskAddCollectionResult :raises ~azure.batch.custom.CreateTasksError: """ - kwargs.update({"timeout": timeout, "ocpdate": ocpdate}) + kwargs.update({"service_timeout": service_timeout, "ocp_date": ocp_date}) # deque operations(append/pop) are thread-safe results_queue: Deque[_models.BatchTaskCreateResult] = collections.deque() task_workflow_manager = _TaskWorkflowManager(self, job_id=job_id, task_collection=task_collection, **kwargs) # multi-threaded behavior - if concurrencies: - if concurrencies < 0: - raise ValueError("Concurrencies must be positive or 0") + if max_concurrency: + if max_concurrency < 0: + raise ValueError("max_concurrency must be positive or 0") active_threads = [] - for _ in range(concurrencies): + for _ in range(max_concurrency): active_threads.append( threading.Thread( target=task_workflow_manager.task_collection_thread_handler, @@ -1157,7 +1158,7 @@ def create_tasks( task_workflow_manager.errors, ) submitted_tasks = _handle_output(results_queue) - return _models.BatchCreateTaskCollectionResult(values_property=submitted_tasks) + return _models.BatchCreateTaskCollectionResult(result_values=submitted_tasks) @distributed_trace def get_node_file( @@ -1166,8 +1167,8 @@ def get_node_file( node_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, ocp_range: Optional[str] = None, @@ -1181,13 +1182,13 @@ def get_node_file( :type node_id: str :param file_path: The path to the file or directory that you want to delete. Required. :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 + :keyword service_timeout: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -1209,8 +1210,8 @@ def get_node_file( args = [pool_id, node_id, file_path] kwargs.update( { - "timeout": timeout, - "ocpdate": ocpdate, + "service_timeout": service_timeout, + "ocp_date": ocp_date, "if_modified_since": if_modified_since, "if_unmodified_since": if_unmodified_since, "ocp_range": ocp_range, @@ -1226,8 +1227,8 @@ def get_node_file_properties( node_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, **kwargs: Any @@ -1240,13 +1241,13 @@ def get_node_file_properties( :type node_id: str :param file_path: The path to the file or directory that you want to delete. Required. :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 + :keyword service_timeout: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -1277,8 +1278,8 @@ def cls(_pipeline_response, _json_response, headers): pool_id, node_id, file_path, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, cls=cls, @@ -1294,8 +1295,8 @@ def get_task_file_properties( task_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, **kwargs: Any @@ -1308,13 +1309,13 @@ def get_task_file_properties( :type task_id: str :param file_path: The path to the Task file that you want to get the content of. Required. :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 + :keyword service_timeout: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -1345,8 +1346,8 @@ def cls(_pipeline_response, _json_response, headers): job_id, task_id, file_path, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, cls=cls, @@ -1362,8 +1363,8 @@ def get_task_file( task_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, ocp_range: Optional[str] = None, @@ -1377,13 +1378,13 @@ def get_task_file( :type task_id: str :param file_path: The path to the Task file that you want to get the content of. Required. :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 + :keyword service_timeout: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -1406,8 +1407,8 @@ def get_task_file( args = [job_id, task_id, file_path] kwargs.update( { - "timeout": timeout, - "ocpdate": ocpdate, + "service_timeout": service_timeout, + "ocp_date": ocp_date, "if_modified_since": if_modified_since, "if_unmodified_since": if_unmodified_since, "ocp_range": ocp_range, @@ -1429,16 +1430,14 @@ def patch_sdk(): class _TaskWorkflowManager: """Worker class for one create_task_collection request - :param ~TaskOperations task_operations: Parent object which instantiated this + :param batch_client: Parent object which instantiated this + :type batch_client: ~_BatchClientOperationsMixin :param str job_id: The ID of the job to which the task collection is to be added. - :param tasks_to_add: The collection of tasks to add. - :type tasks_to_add: list of :class:`TaskAddParameter - ` - :param task_create_task_collection_options: Additional parameters for the - operation - :type task_create_task_collection_options: :class:`BatchTaskAddCollectionResult - ` + :param task_collection: The collection of tasks to add. + :type task_collection: Iterable[~azure.batch.models.BatchTaskCreateOptions] + :param kwargs: Additional parameters for the operation + :type kwargs: dict """ def __init__( @@ -1487,7 +1486,7 @@ def _bulk_add_tasks( create_task_collection_response: _models.BatchCreateTaskCollectionResult = ( self._batch_client.create_task_collection( job_id=self._job_id, - task_collection=_models.BatchTaskGroup(values_property=chunk_tasks_to_add), + task_collection=_models.BatchTaskGroup(task_values=chunk_tasks_to_add), **self._kwargs, ) ) @@ -1544,8 +1543,8 @@ def _bulk_add_tasks( # Unknown State - don't know if tasks failed to add or were successful self.errors.appendleft(e) else: - if create_task_collection_response.values_property: - for task_result in create_task_collection_response.values_property: # pylint: disable=no-member + if create_task_collection_response.result_values: + for task_result in create_task_collection_response.result_values: # pylint: disable=no-member if task_result.status == _models.BatchTaskAddStatus.SERVER_ERROR: # Server error will be retried with self._pending_queue_lock: diff --git a/sdk/batch/azure-batch/azure/batch/_utils/model_base.py b/sdk/batch/azure-batch/azure/batch/_utils/model_base.py index 5e1a2d0fa5ec..c402af2afc63 100644 --- a/sdk/batch/azure-batch/azure/batch/_utils/model_base.py +++ b/sdk/batch/azure-batch/azure/batch/_utils/model_base.py @@ -688,10 +688,6 @@ def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: if not rf._rest_name_input: rf._rest_name_input = attr cls._attr_to_rest_field: dict[str, _RestField] = dict(attr_to_rest_field.items()) - cls._backcompat_attr_to_rest_field: dict[str, _RestField] = { - Model._get_backcompat_attribute_name(cls._attr_to_rest_field, attr): rf - for attr, rf in cls._attr_to_rest_field.items() - } cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") return super().__new__(cls) @@ -701,16 +697,6 @@ def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: if hasattr(base, "__mapping__"): base.__mapping__[discriminator or cls.__name__] = cls # type: ignore - @classmethod - def _get_backcompat_attribute_name(cls, attr_to_rest_field: dict[str, "_RestField"], attr_name: str) -> str: - rest_field_obj = attr_to_rest_field.get(attr_name) # pylint: disable=protected-access - if rest_field_obj is None: - return attr_name - original_tsp_name = getattr(rest_field_obj, "_original_tsp_name", None) # pylint: disable=protected-access - if original_tsp_name: - return original_tsp_name - return attr_name - @classmethod def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]: for v in cls.__dict__.values(): @@ -1037,7 +1023,7 @@ def _failsafe_deserialize( ) -> typing.Any: try: return _deserialize(deserializer, response.json(), module, rf, format) - except DeserializationError: + except Exception: # pylint: disable=broad-except _LOGGER.warning( "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True ) @@ -1050,14 +1036,13 @@ def _failsafe_deserialize_xml( ) -> typing.Any: try: return _deserialize_xml(deserializer, response.text()) - except DeserializationError: + except Exception: # pylint: disable=broad-except _LOGGER.warning( "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True ) return None -# pylint: disable=too-many-instance-attributes class _RestField: def __init__( self, @@ -1070,7 +1055,6 @@ def __init__( format: typing.Optional[str] = None, is_multipart_file_input: bool = False, xml: typing.Optional[dict[str, typing.Any]] = None, - original_tsp_name: typing.Optional[str] = None, ): self._type = type self._rest_name_input = name @@ -1082,7 +1066,6 @@ def __init__( self._format = format self._is_multipart_file_input = is_multipart_file_input self._xml = xml if xml is not None else {} - self._original_tsp_name = original_tsp_name @property def _class_type(self) -> typing.Any: @@ -1161,7 +1144,6 @@ def rest_field( format: typing.Optional[str] = None, is_multipart_file_input: bool = False, xml: typing.Optional[dict[str, typing.Any]] = None, - original_tsp_name: typing.Optional[str] = None, ) -> typing.Any: return _RestField( name=name, @@ -1171,7 +1153,6 @@ def rest_field( format=format, is_multipart_file_input=is_multipart_file_input, xml=xml, - original_tsp_name=original_tsp_name, ) diff --git a/sdk/batch/azure-batch/azure/batch/_version.py b/sdk/batch/azure-batch/azure/batch/_version.py index 9040f382ff9c..76bba1459abc 100644 --- a/sdk/batch/azure-batch/azure/batch/_version.py +++ b/sdk/batch/azure-batch/azure/batch/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "15.1.0b3" +VERSION = "15.1.0" diff --git a/sdk/batch/azure-batch/azure/batch/aio/_client.py b/sdk/batch/azure-batch/azure/batch/aio/_client.py index 1305760b30eb..b5791f0f9d96 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_client.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_client.py @@ -31,8 +31,9 @@ class BatchClient(_BatchClientOperationsMixin): :type endpoint: str :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Default value is "2025-06-01". - Note that overriding this default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2025-06-01" + and None. Default value is "2025-06-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ diff --git a/sdk/batch/azure-batch/azure/batch/aio/_configuration.py b/sdk/batch/azure-batch/azure/batch/aio/_configuration.py index 1e6e3f2a0935..f6ae6c8740a2 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_configuration.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_configuration.py @@ -28,8 +28,9 @@ class BatchClientConfiguration: # pylint: disable=too-many-instance-attributes :type endpoint: str :param credential: Credential used to authenticate requests to the service. Required. :type credential: ~azure.core.credentials_async.AsyncTokenCredential - :keyword api_version: The API version to use for this operation. Default value is "2025-06-01". - Note that overriding this default value may result in unsupported behavior. + :keyword api_version: The API version to use for this operation. Known values are "2025-06-01" + and None. Default value is "2025-06-01". Note that overriding this default value may result in + unsupported behavior. :paramtype api_version: str """ diff --git a/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py b/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py index 33fadd17e01d..fae1a3a281a9 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_operations/_operations.py @@ -122,27 +122,26 @@ class _BatchClientOperationsMixin( # pylint: disable=too-many-public-methods def list_applications( self, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, **kwargs: Any ) -> AsyncItemPaged["_models.BatchApplication"]: """Lists all of the applications available in the specified Account. - This operation returns only Applications and versions that are available for - use on Compute Nodes; that is, that can be used in an Package reference. For - administrator information about applications and versions that are not yet - available to Compute Nodes, use the Azure portal or the Azure Resource Manager - API. + This operation returns only Applications and versions that are available for use on Compute + Nodes; that is, that can be used in an Package reference. For administrator information about + applications and versions that are not yet available to Compute Nodes, use the Azure portal or + the Azure Resource Manager API. - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -167,8 +166,8 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_applications_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, api_version=self._config.api_version, headers=_headers, @@ -236,28 +235,27 @@ async def get_application( self, application_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.BatchApplication: """Gets information about the specified Application. - This operation returns only Applications and versions that are available for - use on Compute Nodes; that is, that can be used in an Package reference. For - administrator information about Applications and versions that are not yet - available to Compute Nodes, use the Azure portal or the Azure Resource Manager - API. + This operation returns only Applications and versions that are available for use on Compute + Nodes; that is, that can be used in an Package reference. For administrator information about + Applications and versions that are not yet available to Compute Nodes, use the Azure portal or + the Azure Resource Manager API. :param application_id: The ID of the Application. Required. :type application_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: BatchApplication. The BatchApplication is compatible with MutableMapping :rtype: ~azure.batch.models.BatchApplication :raises ~azure.core.exceptions.HttpResponseError: @@ -277,8 +275,8 @@ async def get_application( _request = build_batch_get_application_request( application_id=application_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, api_version=self._config.api_version, headers=_headers, params=_params, @@ -328,45 +326,44 @@ async def get_application( def list_pool_usage_metrics( self, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, - starttime: Optional[datetime.datetime] = None, - endtime: Optional[datetime.datetime] = None, + start_time: Optional[datetime.datetime] = None, + end_time: Optional[datetime.datetime] = None, filter: Optional[str] = None, **kwargs: Any ) -> AsyncItemPaged["_models.BatchPoolUsageMetrics"]: - """Lists the usage metrics, aggregated by Pool across individual time intervals, - for the specified Account. + """Lists the usage metrics, aggregated by Pool across individual time intervals, for the specified + Account. - If you do not specify a $filter clause including a poolId, the response - includes all Pools that existed in the Account in the time range of the - returned aggregation intervals. If you do not specify a $filter clause - including a startTime or endTime these filters default to the start and end - times of the last aggregation interval currently available; that is, only the - last aggregation interval is returned. + If you do not specify a $filter clause including a poolId, the response includes all Pools that + existed in the Account in the time range of the returned aggregation intervals. If you do not + specify a $filter clause including a startTime or endTime these filters default to the start + and end times of the last aggregation interval currently available; that is, only the last + aggregation interval is returned. - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int - :keyword starttime: The earliest time from which to include metrics. This must be at least two + :keyword start_time: The earliest time from which to include metrics. This must be at least two and a half hours before the current time. If not specified this defaults to the start time of the last aggregation interval currently available. Default value is None. - :paramtype starttime: ~datetime.datetime - :keyword endtime: The latest time from which to include metrics. This must be at least two + :paramtype start_time: ~datetime.datetime + :keyword end_time: The latest time from which to include metrics. This must be at least two hours before the current time. If not specified this defaults to the end time of the last aggregation interval currently available. Default value is None. - :paramtype endtime: ~datetime.datetime + :paramtype end_time: ~datetime.datetime :keyword filter: An OData $filter clause. For more information on constructing this filter, see `https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-account-usage-metrics `_. @@ -393,11 +390,11 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_pool_usage_metrics_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, - starttime=starttime, - endtime=endtime, + start_time=start_time, + end_time=end_time, filter=filter, api_version=self._config.api_version, headers=_headers, @@ -465,26 +462,25 @@ async def create_pool( self, pool: _models.BatchPoolCreateOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Creates a Pool to the specified Account. - When naming Pools, avoid including sensitive information such as user names or - secret project names. This information may appear in telemetry logs accessible - to Microsoft Support engineers. + When naming Pools, avoid including sensitive information such as user names or secret project + names. This information may appear in telemetry logs accessible to Microsoft Support engineers. :param pool: The Pool to be created. Required. :type pool: ~azure.batch.models.BatchPoolCreateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -508,8 +504,8 @@ async def create_pool( _content = json.dumps(pool, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_create_pool_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -550,8 +546,8 @@ async def create_pool( def list_pools( self, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -562,14 +558,14 @@ def list_pools( Lists all of the Pools in the specified Account. - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -603,8 +599,8 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_pools_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, select=select, @@ -675,8 +671,8 @@ async def _delete_pool_internal( self, pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -685,29 +681,27 @@ async def _delete_pool_internal( ) -> None: """Deletes a Pool from the specified Account. - When you request that a Pool be deleted, the following actions occur: the Pool - state is set to deleting; any ongoing resize operation on the Pool are stopped; - the Batch service starts resizing the Pool to zero Compute Nodes; any Tasks - running on existing Compute Nodes are terminated and requeued (as if a resize - Pool operation had been requested with the default requeue option); finally, - the Pool is removed from the system. Because running Tasks are requeued, the - user can rerun these Tasks by updating their Job to target a different Pool. - The Tasks can then run on the new Pool. If you want to override the requeue - behavior, then you should call resize Pool explicitly to shrink the Pool to - zero size before deleting the Pool. If you call an Update, Patch or Delete API - on a Pool in the deleting state, it will fail with HTTP status code 409 with - error code PoolBeingDeleted. + When you request that a Pool be deleted, the following actions occur: the Pool state is set to + deleting; any ongoing resize operation on the Pool are stopped; the Batch service starts + resizing the Pool to zero Compute Nodes; any Tasks running on existing Compute Nodes are + terminated and requeued (as if a resize Pool operation had been requested with the default + requeue option); finally, the Pool is removed from the system. Because running Tasks are + requeued, the user can rerun these Tasks by updating their Job to target a different Pool. The + Tasks can then run on the new Pool. If you want to override the requeue behavior, then you + should call resize Pool explicitly to shrink the Pool to zero size before deleting the Pool. If + you call an Update, Patch or Delete API on a Pool in the deleting state, it will fail with HTTP + status code 409 with error code PoolBeingDeleted. :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -748,8 +742,8 @@ async def _delete_pool_internal( _request = build_batch_delete_pool_internal_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -790,8 +784,8 @@ async def pool_exists( self, pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -802,14 +796,14 @@ async def pool_exists( :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -850,8 +844,8 @@ async def pool_exists( _request = build_batch_pool_exists_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -896,8 +890,8 @@ async def get_pool( self, pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, @@ -910,14 +904,14 @@ async def get_pool( :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -962,8 +956,8 @@ async def get_pool( _request = build_batch_get_pool_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, select=select, @@ -1021,8 +1015,8 @@ async def update_pool( pool_id: str, pool: _models.BatchPoolUpdateOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -1031,22 +1025,22 @@ async def update_pool( ) -> None: """Updates the properties of the specified Pool. - This only replaces the Pool properties specified in the request. For example, - if the Pool has a StartTask associated with it, and a request does not specify - a StartTask element, then the Pool keeps the existing StartTask. + This only replaces the Pool properties specified in the request. For example, if the Pool has a + StartTask associated with it, and a request does not specify a StartTask element, then the Pool + keeps the existing StartTask. :param pool_id: The ID of the Pool to get. Required. :type pool_id: str :param pool: The pool properties to update. Required. :type pool: ~azure.batch.models.BatchPoolUpdateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -1092,8 +1086,8 @@ async def update_pool( _request = build_batch_update_pool_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -1136,7 +1130,12 @@ async def update_pool( @distributed_trace_async async def disable_pool_auto_scale( - self, pool_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + self, + pool_id: str, + *, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, + **kwargs: Any ) -> None: """Disables automatic scaling for a Pool. @@ -1144,14 +1143,14 @@ async def disable_pool_auto_scale( :param pool_id: The ID of the Pool on which to disable automatic scaling. Required. :type pool_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -1171,8 +1170,8 @@ async def disable_pool_auto_scale( _request = build_batch_disable_pool_auto_scale_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, api_version=self._config.api_version, headers=_headers, params=_params, @@ -1213,8 +1212,8 @@ async def enable_pool_auto_scale( pool_id: str, enable_auto_scale_options: _models.BatchPoolEnableAutoScaleOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -1223,25 +1222,24 @@ async def enable_pool_auto_scale( ) -> None: """Enables automatic scaling for a Pool. - You cannot enable automatic scaling on a Pool if a resize operation is in - progress on the Pool. If automatic scaling of the Pool is currently disabled, - you must specify a valid autoscale formula as part of the request. If automatic - scaling of the Pool is already enabled, you may specify a new autoscale formula - and/or a new evaluation interval. You cannot call this API for the same Pool - more than once every 30 seconds. + You cannot enable automatic scaling on a Pool if a resize operation is in progress on the Pool. + If automatic scaling of the Pool is currently disabled, you must specify a valid autoscale + formula as part of the request. If automatic scaling of the Pool is already enabled, you may + specify a new autoscale formula and/or a new evaluation interval. You cannot call this API for + the same Pool more than once every 30 seconds. :param pool_id: The ID of the Pool to get. Required. :type pool_id: str :param enable_auto_scale_options: The options to use for enabling automatic scaling. Required. :type enable_auto_scale_options: ~azure.batch.models.BatchPoolEnableAutoScaleOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -1287,8 +1285,8 @@ async def enable_pool_auto_scale( _request = build_batch_enable_pool_auto_scale_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -1335,15 +1333,15 @@ async def evaluate_pool_auto_scale( pool_id: str, evaluate_auto_scale_options: _models.BatchPoolEvaluateAutoScaleOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.AutoScaleRun: """Gets the result of evaluating an automatic scaling formula on the Pool. - This API is primarily for validating an autoscale formula, as it simply returns - the result without applying the formula to the Pool. The Pool must have auto - scaling enabled in order to evaluate a formula. + This API is primarily for validating an autoscale formula, as it simply returns the result + without applying the formula to the Pool. The Pool must have auto scaling enabled in order to + evaluate a formula. :param pool_id: The ID of the Pool on which to evaluate the automatic scaling formula. Required. @@ -1351,14 +1349,14 @@ async def evaluate_pool_auto_scale( :param evaluate_auto_scale_options: The options to use for evaluating the automatic scaling formula. Required. :type evaluate_auto_scale_options: ~azure.batch.models.BatchPoolEvaluateAutoScaleOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: AutoScaleRun. The AutoScaleRun is compatible with MutableMapping :rtype: ~azure.batch.models.AutoScaleRun :raises ~azure.core.exceptions.HttpResponseError: @@ -1383,8 +1381,8 @@ async def evaluate_pool_auto_scale( _request = build_batch_evaluate_pool_auto_scale_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -1439,8 +1437,8 @@ async def _resize_pool_internal( pool_id: str, resize_options: _models.BatchPoolResizeOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -1449,26 +1447,25 @@ async def _resize_pool_internal( ) -> None: """Changes the number of Compute Nodes that are assigned to a Pool. - You can only resize a Pool when its allocation state is steady. If the Pool is - already resizing, the request fails with status code 409. When you resize a - Pool, the Pool's allocation state changes from steady to resizing. You cannot - resize Pools which are configured for automatic scaling. If you try to do this, - the Batch service returns an error 409. If you resize a Pool downwards, the - Batch service chooses which Compute Nodes to remove. To remove specific Compute - Nodes, use the Pool remove Compute Nodes API instead. + You can only resize a Pool when its allocation state is steady. If the Pool is already + resizing, the request fails with status code 409. When you resize a Pool, the Pool's allocation + state changes from steady to resizing. You cannot resize Pools which are configured for + automatic scaling. If you try to do this, the Batch service returns an error 409. If you resize + a Pool downwards, the Batch service chooses which Compute Nodes to remove. To remove specific + Compute Nodes, use the Pool remove Compute Nodes API instead. :param pool_id: The ID of the Pool to get. Required. :type pool_id: str :param resize_options: The options to use for resizing the pool. Required. :type resize_options: ~azure.batch.models.BatchPoolResizeOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -1514,8 +1511,8 @@ async def _resize_pool_internal( _request = build_batch_resize_pool_internal_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -1561,8 +1558,8 @@ async def _stop_pool_resize_internal( self, pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -1571,24 +1568,23 @@ async def _stop_pool_resize_internal( ) -> None: """Stops an ongoing resize operation on the Pool. - This does not restore the Pool to its previous state before the resize - operation: it only stops any further changes being made, and the Pool maintains - its current state. After stopping, the Pool stabilizes at the number of Compute - Nodes it was at when the stop operation was done. During the stop operation, - the Pool allocation state changes first to stopping and then to steady. A - resize operation need not be an explicit resize Pool request; this API can also - be used to halt the initial sizing of the Pool when it is created. + This does not restore the Pool to its previous state before the resize operation: it only stops + any further changes being made, and the Pool maintains its current state. After stopping, the + Pool stabilizes at the number of Compute Nodes it was at when the stop operation was done. + During the stop operation, the Pool allocation state changes first to stopping and then to + steady. A resize operation need not be an explicit resize Pool request; this API can also be + used to halt the initial sizing of the Pool when it is created. :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -1629,8 +1625,8 @@ async def _stop_pool_resize_internal( _request = build_batch_stop_pool_resize_internal_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -1675,28 +1671,28 @@ async def replace_pool_properties( pool_id: str, pool: _models.BatchPoolReplaceOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Updates the properties of the specified Pool. - This fully replaces all the updatable properties of the Pool. For example, if - the Pool has a StartTask associated with it and if StartTask is not specified - with this request, then the Batch service will remove the existing StartTask. + This fully replaces all the updatable properties of the Pool. For example, if the Pool has a + StartTask associated with it and if StartTask is not specified with this request, then the + Batch service will remove the existing StartTask. :param pool_id: The ID of the Pool to update. Required. :type pool_id: str :param pool: The options to use for replacing properties on the pool. Required. :type pool: ~azure.batch.models.BatchPoolReplaceOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -1721,8 +1717,8 @@ async def replace_pool_properties( _request = build_batch_replace_pool_properties_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -1765,8 +1761,8 @@ async def _remove_nodes_internal( pool_id: str, remove_options: _models.BatchNodeRemoveOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -1775,22 +1771,22 @@ async def _remove_nodes_internal( ) -> None: """Removes Compute Nodes from the specified Pool. - This operation can only run when the allocation state of the Pool is steady. - When this operation runs, the allocation state changes from steady to resizing. - Each request may remove up to 100 nodes. + This operation can only run when the allocation state of the Pool is steady. When this + operation runs, the allocation state changes from steady to resizing. Each request may remove + up to 100 nodes. :param pool_id: The ID of the Pool to get. Required. :type pool_id: str :param remove_options: The options to use for removing the node. Required. :type remove_options: ~azure.batch.models.BatchNodeRemoveOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -1836,8 +1832,8 @@ async def _remove_nodes_internal( _request = build_batch_remove_nodes_internal_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -1882,8 +1878,8 @@ async def _remove_nodes_internal( def list_supported_images( self, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any @@ -1892,14 +1888,14 @@ def list_supported_images( Lists all Virtual Machine Images supported by the Azure Batch service. - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -1929,8 +1925,8 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_supported_images_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, api_version=self._config.api_version, @@ -1998,24 +1994,23 @@ async def get_next(next_link=None): def list_pool_node_counts( self, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, **kwargs: Any ) -> AsyncItemPaged["_models.BatchPoolNodeCounts"]: - """Gets the number of Compute Nodes in each state, grouped by Pool. Note that the - numbers returned may not always be up to date. If you need exact node counts, - use a list query. + """Gets the number of Compute Nodes in each state, grouped by Pool. Note that the numbers returned + may not always be up to date. If you need exact node counts, use a list query. - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -2045,8 +2040,8 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_pool_node_counts_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, api_version=self._config.api_version, @@ -2115,8 +2110,8 @@ async def _delete_job_internal( self, job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -2126,25 +2121,24 @@ async def _delete_job_internal( ) -> None: """Deletes a Job. - Deleting a Job also deletes all Tasks that are part of that Job, and all Job - statistics. This also overrides the retention period for Task data; that is, if - the Job contains Tasks which are still retained on Compute Nodes, the Batch - services deletes those Tasks' working directories and all their contents. When - a Delete Job request is received, the Batch service sets the Job to the - deleting state. All update operations on a Job that is in deleting state will - fail with status code 409 (Conflict), with additional information indicating - that the Job is being deleted. + Deleting a Job also deletes all Tasks that are part of that Job, and all Job statistics. This + also overrides the retention period for Task data; that is, if the Job contains Tasks which are + still retained on Compute Nodes, the Batch services deletes those Tasks' working directories + and all their contents. When a Delete Job request is received, the Batch service sets the Job + to the deleting state. All update operations on a Job that is in deleting state will fail with + status code 409 (Conflict), with additional information indicating that the Job is being + deleted. :param job_id: The ID of the Job to delete. Required. :type job_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -2188,8 +2182,8 @@ async def _delete_job_internal( _request = build_batch_delete_job_internal_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, force=force, @@ -2231,8 +2225,8 @@ async def get_job( self, job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, @@ -2247,14 +2241,14 @@ async def get_job( :param job_id: The ID of the Job. Required. :type job_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -2299,8 +2293,8 @@ async def get_job( _request = build_batch_get_job_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, select=select, @@ -2358,8 +2352,8 @@ async def update_job( job_id: str, job: _models.BatchJobUpdateOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -2368,22 +2362,22 @@ async def update_job( ) -> None: """Updates the properties of the specified Job. - This replaces only the Job properties specified in the request. For example, if - the Job has constraints, and a request does not specify the constraints - element, then the Job keeps the existing constraints. + This replaces only the Job properties specified in the request. For example, if the Job has + constraints, and a request does not specify the constraints element, then the Job keeps the + existing constraints. :param job_id: The ID of the Job whose properties you want to update. Required. :type job_id: str :param job: The options to use for updating the Job. Required. :type job: ~azure.batch.models.BatchJobUpdateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -2429,8 +2423,8 @@ async def update_job( _request = build_batch_update_job_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -2477,8 +2471,8 @@ async def replace_job( job_id: str, job: _models.BatchJob, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -2487,22 +2481,22 @@ async def replace_job( ) -> None: """Updates the properties of the specified Job. - This fully replaces all the updatable properties of the Job. For example, if - the Job has constraints associated with it and if constraints is not specified - with this request, then the Batch service will remove the existing constraints. + This fully replaces all the updatable properties of the Job. For example, if the Job has + constraints associated with it and if constraints is not specified with this request, then the + Batch service will remove the existing constraints. :param job_id: The ID of the Job whose properties you want to update. Required. :type job_id: str :param job: A job with updated properties. Required. :type job: ~azure.batch.models.BatchJob - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -2548,8 +2542,8 @@ async def replace_job( _request = build_batch_replace_job_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -2596,8 +2590,8 @@ async def _disable_job_internal( job_id: str, disable_options: _models.BatchJobDisableOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -2606,27 +2600,26 @@ async def _disable_job_internal( ) -> None: """Disables the specified Job, preventing new Tasks from running. - The Batch Service immediately moves the Job to the disabling state. Batch then - uses the disableTasks parameter to determine what to do with the currently - running Tasks of the Job. The Job remains in the disabling state until the - disable operation is completed and all Tasks have been dealt with according to - the disableTasks option; the Job then moves to the disabled state. No new Tasks - are started under the Job until it moves back to active state. If you try to - disable a Job that is in any state other than active, disabling, or disabled, - the request fails with status code 409. + The Batch Service immediately moves the Job to the disabling state. Batch then uses the + disableTasks parameter to determine what to do with the currently running Tasks of the Job. The + Job remains in the disabling state until the disable operation is completed and all Tasks have + been dealt with according to the disableTasks option; the Job then moves to the disabled state. + No new Tasks are started under the Job until it moves back to active state. If you try to + disable a Job that is in any state other than active, disabling, or disabled, the request fails + with status code 409. :param job_id: The ID of the Job to disable. Required. :type job_id: str :param disable_options: The options to use for disabling the Job. Required. :type disable_options: ~azure.batch.models.BatchJobDisableOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -2672,8 +2665,8 @@ async def _disable_job_internal( _request = build_batch_disable_job_internal_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -2719,8 +2712,8 @@ async def _enable_job_internal( self, job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -2729,23 +2722,22 @@ async def _enable_job_internal( ) -> None: """Enables the specified Job, allowing new Tasks to run. - When you call this API, the Batch service sets a disabled Job to the enabling - state. After the this operation is completed, the Job moves to the active - state, and scheduling of new Tasks under the Job resumes. The Batch service - does not allow a Task to remain in the active state for more than 180 days. - Therefore, if you enable a Job containing active Tasks which were added more - than 180 days ago, those Tasks will not run. + When you call this API, the Batch service sets a disabled Job to the enabling state. After the + this operation is completed, the Job moves to the active state, and scheduling of new Tasks + under the Job resumes. The Batch service does not allow a Task to remain in the active state + for more than 180 days. Therefore, if you enable a Job containing active Tasks which were added + more than 180 days ago, those Tasks will not run. :param job_id: The ID of the Job to enable. Required. :type job_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -2786,8 +2778,8 @@ async def _enable_job_internal( _request = build_batch_enable_job_internal_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -2832,8 +2824,8 @@ async def _terminate_job_internal( job_id: str, options: Optional[_models.BatchJobTerminateOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -2843,25 +2835,24 @@ async def _terminate_job_internal( ) -> None: """Terminates the specified Job, marking it as completed. - When a Terminate Job request is received, the Batch service sets the Job to the - terminating state. The Batch service then terminates any running Tasks - associated with the Job and runs any required Job release Tasks. Then the Job - moves into the completed state. If there are any Tasks in the Job in the active - state, they will remain in the active state. Once a Job is terminated, new - Tasks cannot be added and any remaining active Tasks will not be scheduled. + When a Terminate Job request is received, the Batch service sets the Job to the terminating + state. The Batch service then terminates any running Tasks associated with the Job and runs any + required Job release Tasks. Then the Job moves into the completed state. If there are any Tasks + in the Job in the active state, they will remain in the active state. Once a Job is terminated, + new Tasks cannot be added and any remaining active Tasks will not be scheduled. :param job_id: The ID of the Job to terminate. Required. :type job_id: str :param options: The options to use for terminating the Job. Default value is None. :type options: ~azure.batch.models.BatchJobTerminateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -2914,8 +2905,8 @@ async def _terminate_job_internal( _request = build_batch_terminate_job_internal_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, force=force, @@ -2962,32 +2953,30 @@ async def create_job( self, job: _models.BatchJobCreateOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Creates a Job to the specified Account. - The Batch service supports two ways to control the work done as part of a Job. - In the first approach, the user specifies a Job Manager Task. The Batch service - launches this Task when it is ready to start the Job. The Job Manager Task - controls all other Tasks that run under this Job, by using the Task APIs. In - the second approach, the user directly controls the execution of Tasks under an - active Job, by using the Task APIs. Also note: when naming Jobs, avoid - including sensitive information such as user names or secret project names. - This information may appear in telemetry logs accessible to Microsoft Support - engineers. + The Batch service supports two ways to control the work done as part of a Job. In the first + approach, the user specifies a Job Manager Task. The Batch service launches this Task when it + is ready to start the Job. The Job Manager Task controls all other Tasks that run under this + Job, by using the Task APIs. In the second approach, the user directly controls the execution + of Tasks under an active Job, by using the Task APIs. Also note: when naming Jobs, avoid + including sensitive information such as user names or secret project names. This information + may appear in telemetry logs accessible to Microsoft Support engineers. :param job: The Job to be created. Required. :type job: ~azure.batch.models.BatchJobCreateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -3011,8 +3000,8 @@ async def create_job( _content = json.dumps(job, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_create_job_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -3053,8 +3042,8 @@ async def create_job( def list_jobs( self, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -3065,14 +3054,14 @@ def list_jobs( Lists all of the Jobs in the specified Account. - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -3106,8 +3095,8 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_jobs_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, select=select, @@ -3178,8 +3167,8 @@ def list_jobs_from_schedule( self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -3193,14 +3182,14 @@ def list_jobs_from_schedule( :param job_schedule_id: The ID of the Job Schedule from which you want to get a list of Jobs. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -3235,8 +3224,8 @@ def prepare_request(next_link=None): _request = build_batch_list_jobs_from_schedule_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, select=select, @@ -3307,33 +3296,32 @@ def list_job_preparation_and_release_task_status( # pylint: disable=name-too-lo self, job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, **kwargs: Any ) -> AsyncItemPaged["_models.BatchJobPreparationAndReleaseTaskStatus"]: - """Lists the execution status of the Job Preparation and Job Release Task for the - specified Job across the Compute Nodes where the Job has run. - - This API returns the Job Preparation and Job Release Task status on all Compute - Nodes that have run the Job Preparation or Job Release Task. This includes - Compute Nodes which have since been removed from the Pool. If this API is - invoked on a Job which has no Job Preparation or Job Release Task, the Batch - service returns HTTP status code 409 (Conflict) with an error code of + """Lists the execution status of the Job Preparation and Job Release Task for the specified Job + across the Compute Nodes where the Job has run. + + This API returns the Job Preparation and Job Release Task status on all Compute Nodes that have + run the Job Preparation or Job Release Task. This includes Compute Nodes which have since been + removed from the Pool. If this API is invoked on a Job which has no Job Preparation or Job + Release Task, the Batch service returns HTTP status code 409 (Conflict) with an error code of JobPreparationTaskNotSpecified. :param job_id: The ID of the Job. Required. :type job_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -3367,8 +3355,8 @@ def prepare_request(next_link=None): _request = build_batch_list_job_preparation_and_release_task_status_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, select=select, @@ -3437,25 +3425,30 @@ async def get_next(next_link=None): @distributed_trace_async async def get_job_task_counts( - self, job_id: str, *, timeout: Optional[int] = None, ocpdate: Optional[datetime.datetime] = None, **kwargs: Any + self, + job_id: str, + *, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, + **kwargs: Any ) -> _models.BatchTaskCountsResult: """Gets the Task counts for the specified Job. - Task counts provide a count of the Tasks by active, running or completed Task - state, and a count of Tasks which succeeded or failed. Tasks in the preparing - state are counted as running. Note that the numbers returned may not always be - up to date. If you need exact task counts, use a list query. + Task counts provide a count of the Tasks by active, running or completed Task state, and a + count of Tasks which succeeded or failed. Tasks in the preparing state are counted as running. + Note that the numbers returned may not always be up to date. If you need exact task counts, use + a list query. :param job_id: The ID of the Job. Required. :type job_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: BatchTaskCountsResult. The BatchTaskCountsResult is compatible with MutableMapping :rtype: ~azure.batch.models.BatchTaskCountsResult :raises ~azure.core.exceptions.HttpResponseError: @@ -3475,8 +3468,8 @@ async def get_job_task_counts( _request = build_batch_get_job_task_counts_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, api_version=self._config.api_version, headers=_headers, params=_params, @@ -3527,8 +3520,8 @@ async def job_schedule_exists( self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -3541,14 +3534,14 @@ async def job_schedule_exists( :param job_schedule_id: The ID of the Job Schedule which you want to check. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -3589,8 +3582,8 @@ async def job_schedule_exists( _request = build_batch_job_schedule_exists_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -3635,8 +3628,8 @@ async def _delete_job_schedule_internal( self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -3646,22 +3639,22 @@ async def _delete_job_schedule_internal( ) -> None: """Deletes a Job Schedule from the specified Account. - When you delete a Job Schedule, this also deletes all Jobs and Tasks under that - schedule. When Tasks are deleted, all the files in their working directories on - the Compute Nodes are also deleted (the retention period is ignored). The Job - Schedule statistics are no longer accessible once the Job Schedule is deleted, - though they are still counted towards Account lifetime statistics. + When you delete a Job Schedule, this also deletes all Jobs and Tasks under that schedule. When + Tasks are deleted, all the files in their working directories on the Compute Nodes are also + deleted (the retention period is ignored). The Job Schedule statistics are no longer accessible + once the Job Schedule is deleted, though they are still counted towards Account lifetime + statistics. :param job_schedule_id: The ID of the Job Schedule to delete. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -3705,8 +3698,8 @@ async def _delete_job_schedule_internal( _request = build_batch_delete_job_schedule_internal_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, force=force, @@ -3748,8 +3741,8 @@ async def get_job_schedule( self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, @@ -3762,14 +3755,14 @@ async def get_job_schedule( :param job_schedule_id: The ID of the Job Schedule to get. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -3814,8 +3807,8 @@ async def get_job_schedule( _request = build_batch_get_job_schedule_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, select=select, @@ -3873,8 +3866,8 @@ async def update_job_schedule( job_schedule_id: str, job_schedule: _models.BatchJobScheduleUpdateOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -3883,24 +3876,23 @@ async def update_job_schedule( ) -> None: """Updates the properties of the specified Job Schedule. - This replaces only the Job Schedule properties specified in the request. For - example, if the schedule property is not specified with this request, then the - Batch service will keep the existing schedule. Changes to a Job Schedule only - impact Jobs created by the schedule after the update has taken place; currently - running Jobs are unaffected. + This replaces only the Job Schedule properties specified in the request. For example, if the + schedule property is not specified with this request, then the Batch service will keep the + existing schedule. Changes to a Job Schedule only impact Jobs created by the schedule after the + update has taken place; currently running Jobs are unaffected. :param job_schedule_id: The ID of the Job Schedule to update. Required. :type job_schedule_id: str :param job_schedule: The options to use for updating the Job Schedule. Required. :type job_schedule: ~azure.batch.models.BatchJobScheduleUpdateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -3946,8 +3938,8 @@ async def update_job_schedule( _request = build_batch_update_job_schedule_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -3994,8 +3986,8 @@ async def replace_job_schedule( job_schedule_id: str, job_schedule: _models.BatchJobSchedule, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -4004,24 +3996,23 @@ async def replace_job_schedule( ) -> None: """Updates the properties of the specified Job Schedule. - This fully replaces all the updatable properties of the Job Schedule. For - example, if the schedule property is not specified with this request, then the - Batch service will remove the existing schedule. Changes to a Job Schedule only - impact Jobs created by the schedule after the update has taken place; currently - running Jobs are unaffected. + This fully replaces all the updatable properties of the Job Schedule. For example, if the + schedule property is not specified with this request, then the Batch service will remove the + existing schedule. Changes to a Job Schedule only impact Jobs created by the schedule after the + update has taken place; currently running Jobs are unaffected. :param job_schedule_id: The ID of the Job Schedule to update. Required. :type job_schedule_id: str :param job_schedule: A Job Schedule with updated properties. Required. :type job_schedule: ~azure.batch.models.BatchJobSchedule - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -4067,8 +4058,8 @@ async def replace_job_schedule( _request = build_batch_replace_job_schedule_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -4114,8 +4105,8 @@ async def disable_job_schedule( self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -4128,14 +4119,14 @@ async def disable_job_schedule( :param job_schedule_id: The ID of the Job Schedule to disable. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -4176,8 +4167,8 @@ async def disable_job_schedule( _request = build_batch_disable_job_schedule_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -4221,8 +4212,8 @@ async def enable_job_schedule( self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -4235,14 +4226,14 @@ async def enable_job_schedule( :param job_schedule_id: The ID of the Job Schedule to enable. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -4283,8 +4274,8 @@ async def enable_job_schedule( _request = build_batch_enable_job_schedule_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -4328,8 +4319,8 @@ async def _terminate_job_schedule_internal( self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -4343,14 +4334,14 @@ async def _terminate_job_schedule_internal( :param job_schedule_id: The ID of the Job Schedule to terminates. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -4395,8 +4386,8 @@ async def _terminate_job_schedule_internal( _request = build_batch_terminate_job_schedule_internal_request( job_schedule_id=job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, force=force, @@ -4441,8 +4432,8 @@ async def create_job_schedule( self, job_schedule: _models.BatchJobScheduleCreateOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Creates a Job Schedule to the specified Account. @@ -4451,14 +4442,14 @@ async def create_job_schedule( :param job_schedule: The Job Schedule to be created. Required. :type job_schedule: ~azure.batch.models.BatchJobScheduleCreateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -4482,8 +4473,8 @@ async def create_job_schedule( _content = json.dumps(job_schedule, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore _request = build_batch_create_job_schedule_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -4524,8 +4515,8 @@ async def create_job_schedule( def list_job_schedules( self, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -4536,14 +4527,14 @@ def list_job_schedules( Lists all of the Job Schedules in the specified Account. - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -4577,8 +4568,8 @@ def prepare_request(next_link=None): if not next_link: _request = build_batch_list_job_schedules_request( - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, select=select, @@ -4650,28 +4641,28 @@ async def create_task( job_id: str, task: _models.BatchTaskCreateOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Creates a Task to the specified Job. - The maximum lifetime of a Task from addition to completion is 180 days. If a - Task has not completed within 180 days of being added it will be terminated by - the Batch service and left in whatever state it was in at that time. + The maximum lifetime of a Task from addition to completion is 180 days. If a Task has not + completed within 180 days of being added it will be terminated by the Batch service and left in + whatever state it was in at that time. :param job_id: The ID of the Job to which the Task is to be created. Required. :type job_id: str :param task: The Task to be created. Required. :type task: ~azure.batch.models.BatchTaskCreateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -4696,8 +4687,8 @@ async def create_task( _request = build_batch_create_task_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -4739,8 +4730,8 @@ def list_tasks( self, job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -4749,20 +4740,19 @@ def list_tasks( ) -> AsyncItemPaged["_models.BatchTask"]: """Lists all of the Tasks that are associated with the specified Job. - For multi-instance Tasks, information such as affinityId, executionInfo and - nodeInfo refer to the primary Task. Use the list subtasks API to retrieve - information about subtasks. + For multi-instance Tasks, information such as affinityId, executionInfo and nodeInfo refer to + the primary Task. Use the list subtasks API to retrieve information about subtasks. :param job_id: The ID of the Job. Required. :type job_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -4797,8 +4787,8 @@ def prepare_request(next_link=None): _request = build_batch_list_tasks_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, select=select, @@ -4870,39 +4860,37 @@ async def create_task_collection( job_id: str, task_collection: _models.BatchTaskGroup, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.BatchCreateTaskCollectionResult: """Adds a collection of Tasks to the specified Job. - Note that each Task must have a unique ID. The Batch service may not return the - results for each Task in the same order the Tasks were submitted in this - request. If the server times out or the connection is closed during the - request, the request may have been partially or fully processed, or not at all. - In such cases, the user should re-issue the request. Note that it is up to the - user to correctly handle failures when re-issuing a request. For example, you - should use the same Task IDs during a retry so that if the prior operation - succeeded, the retry will not create extra Tasks unexpectedly. If the response - contains any Tasks which failed to add, a client can retry the request. In a - retry, it is most efficient to resubmit only Tasks that failed to add, and to - omit Tasks that were successfully added on the first attempt. The maximum - lifetime of a Task from addition to completion is 180 days. If a Task has not - completed within 180 days of being added it will be terminated by the Batch - service and left in whatever state it was in at that time. + Note that each Task must have a unique ID. The Batch service may not return the results for + each Task in the same order the Tasks were submitted in this request. If the server times out + or the connection is closed during the request, the request may have been partially or fully + processed, or not at all. In such cases, the user should re-issue the request. Note that it is + up to the user to correctly handle failures when re-issuing a request. For example, you should + use the same Task IDs during a retry so that if the prior operation succeeded, the retry will + not create extra Tasks unexpectedly. If the response contains any Tasks which failed to add, a + client can retry the request. In a retry, it is most efficient to resubmit only Tasks that + failed to add, and to omit Tasks that were successfully added on the first attempt. The maximum + lifetime of a Task from addition to completion is 180 days. If a Task has not completed within + 180 days of being added it will be terminated by the Batch service and left in whatever state + it was in at that time. :param job_id: The ID of the Job to which the Task collection is to be added. Required. :type job_id: str :param task_collection: The Tasks to be added. Required. :type task_collection: ~azure.batch.models.BatchTaskGroup - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: BatchCreateTaskCollectionResult. The BatchCreateTaskCollectionResult is compatible with MutableMapping :rtype: ~azure.batch.models.BatchCreateTaskCollectionResult @@ -4928,8 +4916,8 @@ async def create_task_collection( _request = build_batch_create_task_collection_request( job_id=job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -4983,8 +4971,8 @@ async def delete_task( job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -4993,24 +4981,23 @@ async def delete_task( ) -> None: """Deletes a Task from the specified Job. - When a Task is deleted, all of the files in its directory on the Compute Node - where it ran are also deleted (regardless of the retention time). For - multi-instance Tasks, the delete Task operation applies synchronously to the - primary task; subtasks and their files are then deleted asynchronously in the - background. + When a Task is deleted, all of the files in its directory on the Compute Node where it ran are + also deleted (regardless of the retention time). For multi-instance Tasks, the delete Task + operation applies synchronously to the primary task; subtasks and their files are then deleted + asynchronously in the background. :param job_id: The ID of the Job from which to delete the Task. Required. :type job_id: str :param task_id: The ID of the Task to delete. Required. :type task_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -5052,8 +5039,8 @@ async def delete_task( _request = build_batch_delete_task_request( job_id=job_id, task_id=task_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -5095,8 +5082,8 @@ async def get_task( job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, @@ -5107,22 +5094,21 @@ async def get_task( ) -> _models.BatchTask: """Gets information about the specified Task. - For multi-instance Tasks, information such as affinityId, executionInfo and - nodeInfo refer to the primary Task. Use the list subtasks API to retrieve - information about subtasks. + For multi-instance Tasks, information such as affinityId, executionInfo and nodeInfo refer to + the primary Task. Use the list subtasks API to retrieve information about subtasks. :param job_id: The ID of the Job that contains the Task. Required. :type job_id: str :param task_id: The ID of the Task to get information about. Required. :type task_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -5168,8 +5154,8 @@ async def get_task( _request = build_batch_get_task_request( job_id=job_id, task_id=task_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, select=select, @@ -5229,8 +5215,8 @@ async def replace_task( task_id: str, task: _models.BatchTask, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -5245,14 +5231,14 @@ async def replace_task( :type task_id: str :param task: The Task to update. Required. :type task: ~azure.batch.models.BatchTask - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -5299,8 +5285,8 @@ async def replace_task( _request = build_batch_replace_task_request( job_id=job_id, task_id=task_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -5347,13 +5333,12 @@ def list_sub_tasks( job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, **kwargs: Any ) -> AsyncItemPaged["_models.BatchSubtask"]: - """Lists all of the subtasks that are associated with the specified multi-instance - Task. + """Lists all of the subtasks that are associated with the specified multi-instance Task. If the Task is not a multi-instance Task then this returns an empty collection. @@ -5361,14 +5346,14 @@ def list_sub_tasks( :type job_id: str :param task_id: The ID of the Task. Required. :type task_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] :return: An iterator like instance of BatchSubtask @@ -5394,8 +5379,8 @@ def prepare_request(next_link=None): _request = build_batch_list_sub_tasks_request( job_id=job_id, task_id=task_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, select=select, api_version=self._config.api_version, headers=_headers, @@ -5464,8 +5449,8 @@ async def terminate_task( job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -5474,22 +5459,22 @@ async def terminate_task( ) -> None: """Terminates the specified Task. - When the Task has been terminated, it moves to the completed state. For - multi-instance Tasks, the terminate Task operation applies synchronously to the - primary task; subtasks are then terminated asynchronously in the background. + When the Task has been terminated, it moves to the completed state. For multi-instance Tasks, + the terminate Task operation applies synchronously to the primary task; subtasks are then + terminated asynchronously in the background. :param job_id: The ID of the Job containing the Task. Required. :type job_id: str :param task_id: The ID of the Task to terminate. Required. :type task_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -5531,8 +5516,8 @@ async def terminate_task( _request = build_batch_terminate_task_request( job_id=job_id, task_id=task_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -5577,37 +5562,35 @@ async def reactivate_task( job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> None: - """Reactivates a Task, allowing it to run again even if its retry count has been - exhausted. + """Reactivates a Task, allowing it to run again even if its retry count has been exhausted. - Reactivation makes a Task eligible to be retried again up to its maximum retry - count. The Task's state is changed to active. As the Task is no longer in the - completed state, any previous exit code or failure information is no longer - available after reactivation. Each time a Task is reactivated, its retry count - is reset to 0. Reactivation will fail for Tasks that are not completed or that - previously completed successfully (with an exit code of 0). Additionally, it - will fail if the Job has completed (or is terminating or deleting). + Reactivation makes a Task eligible to be retried again up to its maximum retry count. The + Task's state is changed to active. As the Task is no longer in the completed state, any + previous exit code or failure information is no longer available after reactivation. Each time + a Task is reactivated, its retry count is reset to 0. Reactivation will fail for Tasks that are + not completed or that previously completed successfully (with an exit code of 0). Additionally, + it will fail if the Job has completed (or is terminating or deleting). :param job_id: The ID of the Job containing the Task. Required. :type job_id: str :param task_id: The ID of the Task to reactivate. Required. :type task_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -5649,8 +5632,8 @@ async def reactivate_task( _request = build_batch_reactivate_task_request( job_id=job_id, task_id=task_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -5696,8 +5679,8 @@ async def delete_task_file( task_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, recursive: Optional[bool] = None, **kwargs: Any ) -> None: @@ -5711,14 +5694,14 @@ async def delete_task_file( :type task_id: str :param file_path: The path to the Task file that you want to get the content of. Required. :type file_path: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword recursive: Whether to delete children of a directory. If the filePath parameter represents a directory instead of a file, you can set recursive to true to delete the @@ -5746,8 +5729,8 @@ async def delete_task_file( job_id=job_id, task_id=task_id, file_path=file_path, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, recursive=recursive, api_version=self._config.api_version, headers=_headers, @@ -5787,8 +5770,8 @@ async def get_task_file( task_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, ocp_range: Optional[str] = None, @@ -5802,14 +5785,14 @@ async def get_task_file( :type task_id: str :param file_path: The path to the Task file that you want to get the content of. Required. :type file_path: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -5845,8 +5828,8 @@ async def get_task_file( job_id=job_id, task_id=task_id, file_path=file_path, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, ocp_range=ocp_range, @@ -5907,8 +5890,8 @@ async def _get_task_file_properties_internal( task_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, **kwargs: Any @@ -5921,14 +5904,14 @@ async def _get_task_file_properties_internal( :type task_id: str :param file_path: The path to the Task file that you want to get the content of. Required. :type file_path: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -5960,8 +5943,8 @@ async def _get_task_file_properties_internal( job_id=job_id, task_id=task_id, file_path=file_path, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, api_version=self._config.api_version, @@ -6011,8 +5994,8 @@ def list_task_files( job_id: str, task_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, recursive: Optional[bool] = None, @@ -6026,14 +6009,14 @@ def list_task_files( :type job_id: str :param task_id: The ID of the Task whose files you want to list. Required. :type task_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -6069,8 +6052,8 @@ def prepare_request(next_link=None): _request = build_batch_list_task_files_request( job_id=job_id, task_id=task_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, recursive=recursive, @@ -6142,14 +6125,17 @@ async def create_node_user( node_id: str, user: _models.BatchNodeUserCreateOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Adds a user Account to the specified Compute Node. - You can add a user Account to a Compute Node only when it is in the idle or - running state. + You can add a user Account to a Compute Node only when it is in the idle or running state. + Before you can remotely login to a Compute Node you must configure access ports for SSH and + RDP. For more information, see + `https://learn.microsoft.com/azure/batch/pool-endpoint-configuration + `_. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str @@ -6157,14 +6143,14 @@ async def create_node_user( :type node_id: str :param user: The options to use for creating the user. Required. :type user: ~azure.batch.models.BatchNodeUserCreateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -6190,8 +6176,8 @@ async def create_node_user( _request = build_batch_create_node_user_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -6235,14 +6221,17 @@ async def delete_node_user( node_id: str, user_name: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Deletes a user Account from the specified Compute Node. - You can delete a user Account to a Compute Node only when it is in the idle or - running state. + You can delete a user Account to a Compute Node only when it is in the idle or running state. + Before you can remotely login to a Compute Node you must configure access ports for SSH and + RDP. For more information, see + `https://learn.microsoft.com/azure/batch/pool-endpoint-configuration + `_. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str @@ -6250,14 +6239,14 @@ async def delete_node_user( :type node_id: str :param user_name: The name of the user Account to delete. Required. :type user_name: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -6279,8 +6268,8 @@ async def delete_node_user( pool_id=pool_id, node_id=node_id, user_name=user_name, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, api_version=self._config.api_version, headers=_headers, params=_params, @@ -6318,18 +6307,18 @@ async def replace_node_user( pool_id: str, node_id: str, user_name: str, - update_options: _models.BatchNodeUserUpdateOptions, + update_options: _models.BatchNodeUserReplaceOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Updates the password and expiration time of a user Account on the specified Compute Node. - This operation replaces of all the updatable properties of the Account. For - example, if the expiryTime element is not specified, the current value is - replaced with the default value, not left unmodified. You can update a user - Account on a Compute Node only when it is in the idle or running state. + This operation replaces of all the updatable properties of the Account. For example, if the + expiryTime element is not specified, the current value is replaced with the default value, not + left unmodified. You can update a user Account on a Compute Node only when it is in the idle or + running state. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str @@ -6338,15 +6327,15 @@ async def replace_node_user( :param user_name: The name of the user Account to update. Required. :type user_name: str :param update_options: The options to use for updating the user. Required. - :type update_options: ~azure.batch.models.BatchNodeUserUpdateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :type update_options: ~azure.batch.models.BatchNodeUserReplaceOptions + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -6373,8 +6362,8 @@ async def replace_node_user( pool_id=pool_id, node_id=node_id, user_name=user_name, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -6417,8 +6406,8 @@ async def get_node( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, **kwargs: Any ) -> _models.BatchNode: @@ -6430,14 +6419,14 @@ async def get_node( :type pool_id: str :param node_id: The ID of the Compute Node that you want to get information about. Required. :type node_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] :return: BatchNode. The BatchNode is compatible with MutableMapping @@ -6460,8 +6449,8 @@ async def get_node( _request = build_batch_get_node_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, select=select, api_version=self._config.api_version, headers=_headers, @@ -6515,8 +6504,8 @@ async def _reboot_node_internal( node_id: str, options: Optional[_models.BatchNodeRebootOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Restarts the specified Compute Node. @@ -6529,14 +6518,14 @@ async def _reboot_node_internal( :type node_id: str :param options: The options to use for rebooting the Compute Node. Default value is None. :type options: ~azure.batch.models.BatchNodeRebootOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -6566,8 +6555,8 @@ async def _reboot_node_internal( _request = build_batch_reboot_node_internal_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -6610,8 +6599,8 @@ async def _start_node_internal( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Starts the specified Compute Node. @@ -6622,14 +6611,14 @@ async def _start_node_internal( :type pool_id: str :param node_id: The ID of the Compute Node that you want to restart. Required. :type node_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -6650,8 +6639,8 @@ async def _start_node_internal( _request = build_batch_start_node_internal_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, api_version=self._config.api_version, headers=_headers, params=_params, @@ -6693,15 +6682,15 @@ async def _reimage_node_internal( node_id: str, options: Optional[_models.BatchNodeReimageOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Reinstalls the operating system on the specified Compute Node. - You can reinstall the operating system on a Compute Node only if it is in an - idle or running state. This API can be invoked only on Pools created with the - cloud service configuration property. + You can reinstall the operating system on a Compute Node only if it is in an idle or running + state. This API can be invoked only on Pools created with the cloud service configuration + property. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str @@ -6709,14 +6698,14 @@ async def _reimage_node_internal( :type node_id: str :param options: The options to use for reimaging the Compute Node. Default value is None. :type options: ~azure.batch.models.BatchNodeReimageOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -6746,8 +6735,8 @@ async def _reimage_node_internal( _request = build_batch_reimage_node_internal_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -6791,8 +6780,8 @@ async def _deallocate_node_internal( node_id: str, options: Optional[_models.BatchNodeDeallocateOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Deallocates the specified Compute Node. @@ -6805,14 +6794,14 @@ async def _deallocate_node_internal( :type node_id: str :param options: The options to use for deallocating the Compute Node. Default value is None. :type options: ~azure.batch.models.BatchNodeDeallocateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -6842,8 +6831,8 @@ async def _deallocate_node_internal( _request = build_batch_deallocate_node_internal_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -6887,14 +6876,14 @@ async def disable_node_scheduling( node_id: str, options: Optional[_models.BatchNodeDisableSchedulingOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Disables Task scheduling on the specified Compute Node. - You can disable Task scheduling on a Compute Node only if its current - scheduling state is enabled. + You can disable Task scheduling on a Compute Node only if its current scheduling state is + enabled. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str @@ -6904,14 +6893,14 @@ async def disable_node_scheduling( :param options: The options to use for disabling scheduling on the Compute Node. Default value is None. :type options: ~azure.batch.models.BatchNodeDisableSchedulingOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -6941,8 +6930,8 @@ async def disable_node_scheduling( _request = build_batch_disable_node_scheduling_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -6985,28 +6974,28 @@ async def enable_node_scheduling( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> None: """Enables Task scheduling on the specified Compute Node. - You can enable Task scheduling on a Compute Node only if its current scheduling - state is disabled. + You can enable Task scheduling on a Compute Node only if its current scheduling state is + disabled. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str :param node_id: The ID of the Compute Node on which you want to enable Task scheduling. Required. :type node_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -7027,8 +7016,8 @@ async def enable_node_scheduling( _request = build_batch_enable_node_scheduling_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, api_version=self._config.api_version, headers=_headers, params=_params, @@ -7069,28 +7058,30 @@ async def get_node_remote_login_settings( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.BatchNodeRemoteLoginSettings: """Gets the settings required for remote login to a Compute Node. - Before you can remotely login to a Compute Node using the remote login settings, - you must create a user Account on the Compute Node. + Before you can remotely login to a Compute Node using the remote login settings, you must + create a user Account on the Compute Node and configure access ports for SSH and RDP. For more + information, see `https://learn.microsoft.com/azure/batch/pool-endpoint-configuration + `_. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str :param node_id: The ID of the Compute Node for which to obtain the remote login settings. Required. :type node_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: BatchNodeRemoteLoginSettings. The BatchNodeRemoteLoginSettings is compatible with MutableMapping :rtype: ~azure.batch.models.BatchNodeRemoteLoginSettings @@ -7112,8 +7103,8 @@ async def get_node_remote_login_settings( _request = build_batch_get_node_remote_login_settings_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, api_version=self._config.api_version, headers=_headers, params=_params, @@ -7166,17 +7157,16 @@ async def upload_node_logs( node_id: str, upload_options: _models.UploadBatchServiceLogsOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.UploadBatchServiceLogsResult: - """Upload Azure Batch service log files from the specified Compute Node to Azure - Blob Storage. + """Upload Azure Batch service log files from the specified Compute Node to Azure Blob Storage. - This is for gathering Azure Batch service log files in an automated fashion - from Compute Nodes if you are experiencing an error and wish to escalate to - Azure support. The Azure Batch service log files should be shared with Azure - support to aid in debugging issues with the Batch service. + This is for gathering Azure Batch service log files in an automated fashion from Compute Nodes + if you are experiencing an error and wish to escalate to Azure support. The Azure Batch service + log files should be shared with Azure support to aid in debugging issues with the Batch + service. :param pool_id: The ID of the Pool that contains the Compute Node. Required. :type pool_id: str @@ -7185,14 +7175,14 @@ async def upload_node_logs( :type node_id: str :param upload_options: The Azure Batch service log files upload options. Required. :type upload_options: ~azure.batch.models.UploadBatchServiceLogsOptions - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: UploadBatchServiceLogsResult. The UploadBatchServiceLogsResult is compatible with MutableMapping :rtype: ~azure.batch.models.UploadBatchServiceLogsResult @@ -7219,8 +7209,8 @@ async def upload_node_logs( _request = build_batch_upload_node_logs_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, content_type=content_type, api_version=self._config.api_version, content=_content, @@ -7273,8 +7263,8 @@ def list_nodes( self, pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, select: Optional[list[str]] = None, @@ -7286,14 +7276,14 @@ def list_nodes( :param pool_id: The ID of the Pool from which you want to list Compute Nodes. Required. :type pool_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -7326,8 +7316,8 @@ def prepare_request(next_link=None): _request = build_batch_list_nodes_request( pool_id=pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, select=select, @@ -7399,8 +7389,8 @@ async def get_node_extension( node_id: str, extension_name: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, select: Optional[list[str]] = None, **kwargs: Any ) -> _models.BatchNodeVMExtension: @@ -7415,14 +7405,14 @@ async def get_node_extension( :param extension_name: The name of the Compute Node Extension that you want to get information about. Required. :type extension_name: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword select: An OData $select clause. Default value is None. :paramtype select: list[str] :return: BatchNodeVMExtension. The BatchNodeVMExtension is compatible with MutableMapping @@ -7446,8 +7436,8 @@ async def get_node_extension( pool_id=pool_id, node_id=node_id, extension_name=extension_name, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, select=select, api_version=self._config.api_version, headers=_headers, @@ -7500,8 +7490,8 @@ def list_node_extensions( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, select: Optional[list[str]] = None, **kwargs: Any @@ -7514,14 +7504,14 @@ def list_node_extensions( :type pool_id: str :param node_id: The ID of the Compute Node that you want to list extensions. Required. :type node_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -7550,8 +7540,8 @@ def prepare_request(next_link=None): _request = build_batch_list_node_extensions_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, select=select, api_version=self._config.api_version, @@ -7622,8 +7612,8 @@ async def delete_node_file( node_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, recursive: Optional[bool] = None, **kwargs: Any ) -> None: @@ -7637,14 +7627,14 @@ async def delete_node_file( :type node_id: str :param file_path: The path to the file or directory. Required. :type file_path: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword recursive: Whether to delete children of a directory. If the filePath parameter represents a directory instead of a file, you can set recursive to true to delete the @@ -7672,8 +7662,8 @@ async def delete_node_file( pool_id=pool_id, node_id=node_id, file_path=file_path, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, recursive=recursive, api_version=self._config.api_version, headers=_headers, @@ -7713,8 +7703,8 @@ async def get_node_file( node_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, ocp_range: Optional[str] = None, @@ -7728,14 +7718,14 @@ async def get_node_file( :type node_id: str :param file_path: The path to the file or directory. Required. :type file_path: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -7771,8 +7761,8 @@ async def get_node_file( pool_id=pool_id, node_id=node_id, file_path=file_path, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, ocp_range=ocp_range, @@ -7833,8 +7823,8 @@ async def _get_node_file_properties_internal( node_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, **kwargs: Any @@ -7847,14 +7837,14 @@ async def _get_node_file_properties_internal( :type node_id: str :param file_path: The path to the file or directory. Required. :type file_path: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -7886,8 +7876,8 @@ async def _get_node_file_properties_internal( pool_id=pool_id, node_id=node_id, file_path=file_path, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, api_version=self._config.api_version, @@ -7937,8 +7927,8 @@ def list_node_files( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, max_results: Optional[int] = None, filter: Optional[str] = None, recursive: Optional[bool] = None, @@ -7952,14 +7942,14 @@ def list_node_files( :type pool_id: str :param node_id: The ID of the Compute Node whose files you want to list. Required. :type node_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in + :keyword service_timeout: The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword max_results: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. :paramtype max_results: int @@ -7993,8 +7983,8 @@ def prepare_request(next_link=None): _request = build_batch_list_node_files_request( pool_id=pool_id, node_id=node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, max_results=max_results, filter=filter, recursive=recursive, diff --git a/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py b/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py index 8865228949e2..653f78580194 100644 --- a/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/aio/_operations/_patch.py @@ -63,8 +63,8 @@ async def begin_delete_job( self, job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -86,14 +86,14 @@ async def begin_delete_job( :param job_id: The ID of the Job to delete. Required. :type job_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the inital request to + delete the Job, in seconds. The default is 30 seconds. If the value is larger than 30, the + default will be used instead. Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has been modified since the specified time. Default value is None. @@ -125,8 +125,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header PipelineResponse, await self._delete_job_internal( job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, force=force, @@ -138,7 +138,6 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header ) polling_method = DeleteJobPollingMethodAsync(self, pipeline_response, None, job_id, polling_interval) - # redundant but needed to fix pylint errors in the polling method code return AsyncLROPoller(self, pipeline_response, lambda _: None, polling_method, **kwargs) @distributed_trace @@ -147,8 +146,8 @@ async def begin_disable_job( job_id: str, disable_options: _models.BatchJobDisableOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -171,14 +170,14 @@ async def begin_disable_job( :type job_id: str :param disable_options: The options to use for disabling the Job. Required. :type disable_options: ~azure.batch.models.BatchJobDisableOptions - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the inital request to + disable the Job, in seconds. The default is 30 seconds. If the value is larger than 30, the + default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -209,8 +208,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header await self._disable_job_internal( job_id, disable_options=disable_options, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -228,8 +227,8 @@ async def begin_enable_job( self, job_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -247,14 +246,14 @@ async def begin_enable_job( :param job_id: The ID of the Job to enable. Required. :type job_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the intal request to + enable the Job, in seconds. The default is 30 seconds. If the value is larger than 30, the + default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -284,8 +283,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header PipelineResponse, await self._enable_job_internal( job_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -303,8 +302,8 @@ async def begin_delete_job_schedule( self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -323,14 +322,14 @@ async def begin_delete_job_schedule( :param job_schedule_id: The ID of the Job Schedule to delete. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request to + delete the Job Schedule, in seconds. The default is 30 seconds. If the value is larger than 30, + the default will be used instead. Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has been modified since the specified time. Default value is None. @@ -358,8 +357,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header PipelineResponse, await self._delete_job_schedule_internal( job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -379,8 +378,8 @@ async def begin_delete_pool( self, pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -405,14 +404,14 @@ async def begin_delete_pool( :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request + to delete the Pool, in seconds. The default is 30 seconds. If the value is larger than 30, + the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -442,8 +441,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header PipelineResponse, await self._delete_pool_internal( pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -463,8 +462,8 @@ async def begin_deallocate_node( node_id: str, options: Optional[_models.BatchNodeDeallocateOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, polling_interval: int = 5, **kwargs: Any ) -> AsyncLROPoller[None]: @@ -478,16 +477,16 @@ async def begin_deallocate_node( :type node_id: str :param options: The options to use for deallocating the Compute Node. Default value is None. :type options: ~azure.batch.models.BatchNodeDeallocateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request to + deallocate the Compute Node, in seconds. The default is 30 seconds. If the value is larger + than 30, the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. + :paramtype ocp_date: ~datetime.datetime :keyword polling_interval: The interval in seconds between polling attempts. Default value is 5. :paramtype polling_interval: int - :paramtype ocpdate: ~datetime.datetime :return: None :rtype: None :raises ~azure.core.exceptions.HttpResponseError: @@ -502,8 +501,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header pool_id, node_id, options=options, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, cls=capture_pipeline_response, **kwargs, ), @@ -521,8 +520,8 @@ async def begin_reboot_node( node_id: str, options: Optional[_models.BatchNodeRebootOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, polling_interval: int = 5, **kwargs: Any ) -> AsyncLROPoller[None]: @@ -536,14 +535,14 @@ async def begin_reboot_node( :type node_id: str :param options: The options to use for rebooting the Compute Node. Default value is None. :type options: ~azure.batch.models.BatchNodeRebootOptions - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request to + reboot the Compute Node, in seconds. The default is 30 seconds. If the value is larger than 30, + the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword polling_interval: The interval in seconds between polling attempts. Default value is 5. :paramtype polling_interval: int :return: None @@ -560,8 +559,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header pool_id, node_id, options=options, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, cls=capture_pipeline_response, **kwargs, ), @@ -577,8 +576,8 @@ async def begin_reimage_node( node_id: str, options: Optional[_models.BatchNodeReimageOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, polling_interval: int = 5, **kwargs: Any ) -> AsyncLROPoller[None]: @@ -596,14 +595,14 @@ async def begin_reimage_node( :type node_id: str :param options: The options to use for reimaging the Compute Node. Default value is None. :type options: ~azure.batch.models.BatchNodeReimageOptions - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request to + reimage the Compute Node, in seconds. The default is 30 seconds. If the value is larger than 30, + the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword polling_interval: The interval in seconds between polling attempts. Default value is 5. :paramtype polling_interval: int :return: None @@ -620,8 +619,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header pool_id, node_id, options=options, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, cls=capture_pipeline_response, **kwargs, ), @@ -638,8 +637,8 @@ async def begin_remove_nodes( pool_id: str, remove_options: _models.BatchNodeRemoveOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -657,14 +656,14 @@ async def begin_remove_nodes( :type pool_id: str :param remove_options: The options to use for removing the node. Required. :type remove_options: ~azure.batch.models.BatchNodeRemoveOptions - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request to + remove the Compute Node, in seconds. The default is 30 seconds. If the value is larger than 30, + the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -695,8 +694,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header await self._remove_nodes_internal( pool_id, remove_options=remove_options, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -715,8 +714,8 @@ async def begin_resize_pool( pool_id: str, resize_options: _models.BatchPoolResizeOptions, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -738,14 +737,14 @@ async def begin_resize_pool( :type pool_id: str :param resize_options: The options to use for resizing the pool. Required. :type resize_options: ~azure.batch.models.BatchPoolResizeOptions - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request + to resize the Pool, in seconds. The default is 30 seconds. If the value is larger than 30, + the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -776,8 +775,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header await self._resize_pool_internal( pool_id, resize_options=resize_options, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -796,8 +795,8 @@ async def begin_start_node( pool_id: str, node_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, polling_interval: int = 5, **kwargs: Any ) -> AsyncLROPoller[None]: @@ -809,14 +808,14 @@ async def begin_start_node( :type pool_id: str :param node_id: The ID of the Compute Node that you want to restart. Required. :type node_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request to + start the Compute Node, in seconds. The default is 30 seconds. If the value is larger than 30, + the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword polling_interval: The interval in seconds between polling attempts. Default value is 5. :paramtype polling_interval: int :return: None @@ -832,8 +831,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header await self._start_node_internal( pool_id, node_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, cls=capture_pipeline_response, **kwargs, ), @@ -847,8 +846,8 @@ async def begin_stop_pool_resize( self, pool_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, etag: Optional[str] = None, @@ -868,14 +867,14 @@ async def begin_stop_pool_resize( :param pool_id: The ID of the Pool to get. Required. :type pool_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request + to stop the Pool resize, in seconds. The default is 30 seconds. If the value is larger than + 30, the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -905,8 +904,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header PipelineResponse, await self._stop_pool_resize_internal( pool_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, etag=etag, @@ -925,8 +924,8 @@ async def begin_terminate_job( job_id: str, options: Optional[_models.BatchJobTerminateOptions] = None, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -948,14 +947,14 @@ async def begin_terminate_job( :type job_id: str :param options: The options to use for terminating the Job. Default value is None. :type options: ~azure.batch.models.BatchJobTerminateOptions - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request + to terminate the Job, in seconds. The default is 30 seconds. If the value is larger than 30, + the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -989,8 +988,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header await self._terminate_job_internal( job_id, options=options, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, force=force, @@ -1009,8 +1008,8 @@ async def begin_terminate_job_schedule( self, job_schedule_id: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, force: Optional[bool] = None, @@ -1023,14 +1022,14 @@ async def begin_terminate_job_schedule( :param job_schedule_id: The ID of the Job Schedule to terminates. Required. :type job_schedule_id: str - :keyword timeout: The maximum time that the server can spend processing the request, in - seconds. The default is 30 seconds. If the value is larger than 30, the default will be used - instead.". Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :keyword service_timeout: The maximum time that the server can spend processing the initial request to + terminate the Job Schedule, in seconds. The default is 30 seconds. If the value is larger than + 30, the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -1064,8 +1063,8 @@ def capture_pipeline_response(pipeline_response, _deserialized, _response_header PipelineResponse, await self._terminate_job_schedule_internal( job_schedule_id, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, force=force, @@ -1087,10 +1086,10 @@ async def create_tasks( self, job_id: str, task_collection: List[_models.BatchTaskCreateOptions], - concurrencies: int = 0, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + max_concurrency: int = 0, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, **kwargs: Any ) -> _models.BatchCreateTaskCollectionResult: """Adds a collection of Tasks to the specified Job. @@ -1114,33 +1113,34 @@ async def create_tasks( :type job_id: str :param task_collection: The Tasks to be added. Required. :type task_collection: ~azure.batch.models.BatchTaskAddCollectionResult - :param concurrencies: number of coroutines to use in parallel when adding tasks. If specified + :keyword max_concurrency: number of coroutines to use in parallel when adding tasks. If specified and greater than 0, will start additional coroutines to submit requests and wait for them to finish. Otherwise will submit create_task_collection requests sequentially on main thread - :type concurrencies: int - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 - applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype max_concurrency: int + :keyword service_timeout: The maximum time that the server can spend processing the equest to + create the task collection, in seconds. The default is 30 seconds. If the value is larger than + 30, the default will be used instead.". Default value is None. + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :return: BatchTaskAddCollectionResult. The BatchTaskAddCollectionResult is compatible with MutableMapping :rtype: ~azure.batch.models.BatchTaskAddCollectionResult :raises ~azure.batch.custom.CreateTasksError: """ - kwargs.update({"timeout": timeout, "ocpdate": ocpdate}) + kwargs.update({"service_timeout": service_timeout, "ocp_date": ocp_date}) results_queue: Deque[_models.BatchTaskCreateResult] = collections.deque() task_workflow_manager = _TaskWorkflowManager(self, job_id=job_id, task_collection=task_collection, **kwargs) - if concurrencies: - if concurrencies < 0: - raise ValueError("Concurrencies must be positive or 0") + if max_concurrency: + if max_concurrency < 0: + raise ValueError("max_concurrency must be positive or 0") coroutines = [] - for _ in range(concurrencies): + for _ in range(max_concurrency): coroutines.append(task_workflow_manager.task_collection_handler(results_queue)) await asyncio.gather(*coroutines) else: @@ -1154,7 +1154,7 @@ async def create_tasks( task_workflow_manager.errors, ) submitted_tasks = _handle_output(results_queue) - return _models.BatchCreateTaskCollectionResult(values_property=submitted_tasks) + return _models.BatchCreateTaskCollectionResult(result_values=submitted_tasks) @distributed_trace async def get_node_file( @@ -1163,8 +1163,8 @@ async def get_node_file( node_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, ocp_range: Optional[str] = None, @@ -1178,13 +1178,13 @@ async def get_node_file( :type node_id: str :param file_path: The path to the file or directory that you want to delete. Required. :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 + :keyword service_timeout: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -1206,8 +1206,8 @@ async def get_node_file( args = [pool_id, node_id, file_path] kwargs.update( { - "timeout": timeout, - "ocpdate": ocpdate, + "service_timeout": service_timeout, + "ocp_date": ocp_date, "if_modified_since": if_modified_since, "if_unmodified_since": if_unmodified_since, "ocp_range": ocp_range, @@ -1223,8 +1223,8 @@ async def get_node_file_properties( node_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, **kwargs: Any @@ -1237,13 +1237,13 @@ async def get_node_file_properties( :type node_id: str :param file_path: The path to the file or directory that you want to delete. Required. :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 + :keyword service_timeout: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -1270,22 +1270,12 @@ def cls(_pipeline_response, _json_response, headers): file_mode=headers["ocp-batch-file-mode"], ) - # cls = lambda pipeline_response, json_response, headers: _models.BatchFileProperties( - # url=headers["ocp-batch-file-url"], - # is_directory=headers["ocp-batch-file-isdirectory"], - # last_modified=headers["Last-Modified"], - # content_length=headers["Content-Length"], - # creation_time=headers["ocp-creation-time"], - # # content_type=headers["Content-Type"], # need to add to typespec - # file_mode=headers["ocp-batch-file-mode"], - # ) - get_response: _models.BatchFileProperties = await super()._get_node_file_properties_internal( # type: ignore pool_id, node_id, file_path, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, cls=cls, @@ -1301,8 +1291,8 @@ async def get_task_file_properties( task_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, **kwargs: Any @@ -1315,13 +1305,13 @@ async def get_task_file_properties( :type task_id: str :param file_path: The path to the Task file that you want to get the content of. Required. :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 + :keyword service_timeout: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -1348,22 +1338,12 @@ def cls(_pipeline_response, _json_response, headers): file_mode=headers["ocp-batch-file-mode"], ) - # cls = lambda pipeline_response, json_response, headers: _models.BatchFileProperties( - # url=headers["ocp-batch-file-url"], - # is_directory=headers["ocp-batch-file-isdirectory"], - # last_modified=headers["Last-Modified"], - # content_length=headers["Content-Length"], - # creation_time=headers["ocp-creation-time"], - # # content_type=headers["Content-Type"], # need to add to typespec - # file_mode=headers["ocp-batch-file-mode"], - # ) - get_response: _models.BatchFileProperties = await super()._get_task_file_properties_internal( # type: ignore job_id, task_id, file_path, - timeout=timeout, - ocpdate=ocpdate, + service_timeout=service_timeout, + ocp_date=ocp_date, if_modified_since=if_modified_since, if_unmodified_since=if_unmodified_since, cls=cls, @@ -1379,8 +1359,8 @@ async def get_task_file( task_id: str, file_path: str, *, - timeout: Optional[int] = None, - ocpdate: Optional[datetime.datetime] = None, + service_timeout: Optional[int] = None, + ocp_date: Optional[datetime.datetime] = None, if_modified_since: Optional[datetime.datetime] = None, if_unmodified_since: Optional[datetime.datetime] = None, ocp_range: Optional[str] = None, @@ -1394,13 +1374,13 @@ async def get_task_file( :type task_id: str :param file_path: The path to the Task file that you want to get the content of. Required. :type file_path: str - :keyword timeout: The maximum number of items to return in the response. A maximum of 1000 + :keyword service_timeout: The maximum number of items to return in the response. A maximum of 1000 applications can be returned. Default value is None. - :paramtype timeout: int - :keyword ocpdate: The time the request was issued. Client libraries typically set this to the + :paramtype service_timeout: int + :keyword ocp_date: The time the request was issued. Client libraries typically set this to the current system clock time; set it explicitly if you are calling the REST API directly. Default value is None. - :paramtype ocpdate: ~datetime.datetime + :paramtype ocp_date: ~datetime.datetime :keyword if_modified_since: A timestamp indicating the last modified time of the resource known to the client. The operation will be performed only if the resource on the service has @@ -1423,8 +1403,8 @@ async def get_task_file( args = [job_id, task_id, file_path] kwargs.update( { - "timeout": timeout, - "ocpdate": ocpdate, + "service_timeout": service_timeout, + "ocp_date": ocp_date, "if_modified_since": if_modified_since, "if_unmodified_since": if_unmodified_since, "ocp_range": ocp_range, @@ -1489,7 +1469,7 @@ async def _bulk_add_tasks( create_task_collection_response: _models.BatchCreateTaskCollectionResult = ( await self._batch_client.create_task_collection( job_id=self._job_id, - task_collection=_models.BatchTaskGroup(values_property=chunk_tasks_to_add), + task_collection=_models.BatchTaskGroup(task_values=chunk_tasks_to_add), **self._kwargs, ) ) @@ -1543,8 +1523,8 @@ async def _bulk_add_tasks( # Unknown State - don't know if tasks failed to add or were successful self.errors.appendleft(e) else: - if create_task_collection_response.values_property: - for task_result in create_task_collection_response.values_property: + if create_task_collection_response.result_values: + for task_result in create_task_collection_response.result_values: if task_result.status == _models.BatchTaskAddStatus.SERVER_ERROR: # Server error will be retried for task in chunk_tasks_to_add: diff --git a/sdk/batch/azure-batch/azure/batch/models/__init__.py b/sdk/batch/azure-batch/azure/batch/models/__init__.py index 50d259c4d582..6f76bdf7d228 100644 --- a/sdk/batch/azure-batch/azure/batch/models/__init__.py +++ b/sdk/batch/azure-batch/azure/batch/models/__init__.py @@ -72,7 +72,7 @@ BatchNodeRemoteLoginSettings, BatchNodeRemoveOptions, BatchNodeUserCreateOptions, - BatchNodeUserUpdateOptions, + BatchNodeUserReplaceOptions, BatchNodeVMExtension, BatchOsDisk, BatchPool, @@ -286,7 +286,7 @@ "BatchNodeRemoteLoginSettings", "BatchNodeRemoveOptions", "BatchNodeUserCreateOptions", - "BatchNodeUserUpdateOptions", + "BatchNodeUserReplaceOptions", "BatchNodeVMExtension", "BatchOsDisk", "BatchPool", diff --git a/sdk/batch/azure-batch/azure/batch/models/_enums.py b/sdk/batch/azure-batch/azure/batch/models/_enums.py index f9294186013f..cfc41ac8fe71 100644 --- a/sdk/batch/azure-batch/azure/batch/models/_enums.py +++ b/sdk/batch/azure-batch/azure/batch/models/_enums.py @@ -674,11 +674,11 @@ class StatusLevelTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Level code.""" ERROR = "Error" - """Error""" + """Error.""" INFO = "Info" - """Info""" + """Info.""" WARNING = "Warning" - """Warning""" + """Warning.""" class StorageAccountType(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/batch/azure-batch/azure/batch/models/_models.py b/sdk/batch/azure-batch/azure/batch/models/_models.py index fc08c261b83f..d0d2acf13815 100644 --- a/sdk/batch/azure-batch/azure/batch/models/_models.py +++ b/sdk/batch/azure-batch/azure/batch/models/_models.py @@ -18,8 +18,8 @@ class AuthenticationTokenSettings(_Model): - """The settings for an authentication token that the Task can use to perform Batch - service operations. + """The settings for an authentication token that the Task can use to perform Batch service + operations. :ivar access: The Batch resources to which the token grants access. The authentication token grants access to a limited set of Batch service operations. Currently the only supported value @@ -63,7 +63,7 @@ class AutomaticOsUpgradePolicy(_Model): applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available.

If this is set to true for Windows based pools, `WindowsConfiguration.enableAutomaticUpdates - `_ + `_ cannot be set to true. :vartype enable_automatic_os_upgrade: bool :ivar use_rolling_upgrade_policy: Indicates whether rolling upgrade policy should be used @@ -84,7 +84,7 @@ class AutomaticOsUpgradePolicy(_Model): """Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available.

If this is set to true for Windows based pools, `WindowsConfiguration.enableAutomaticUpdates - `_ + `_ cannot be set to true.""" use_rolling_upgrade_policy: Optional[bool] = rest_field( name="useRollingUpgradePolicy", visibility=["read", "create", "update", "delete", "query"] @@ -174,8 +174,8 @@ class AutoScaleRunError(_Model): :ivar message: A message describing the autoscale error, intended to be suitable for display in a user interface. :vartype message: str - :ivar values_property: A list of additional error details related to the autoscale error. - :vartype values_property: list[~azure.batch.models.NameValuePair] + :ivar error_values: A list of additional error details related to the autoscale error. + :vartype error_values: list[~azure.batch.models.NameValuePair] """ code: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -184,8 +184,8 @@ class AutoScaleRunError(_Model): message: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A message describing the autoscale error, intended to be suitable for display in a user interface.""" - values_property: Optional[list["_models.NameValuePair"]] = rest_field( - name="values", visibility=["read", "create", "update", "delete", "query"], original_tsp_name="values" + error_values: Optional[list["_models.NameValuePair"]] = rest_field( + name="values", visibility=["read", "create", "update", "delete", "query"] ) """A list of additional error details related to the autoscale error.""" @@ -195,7 +195,7 @@ def __init__( *, code: Optional[str] = None, message: Optional[str] = None, - values_property: Optional[list["_models.NameValuePair"]] = None, + error_values: Optional[list["_models.NameValuePair"]] = None, ) -> None: ... @overload @@ -393,8 +393,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchAffinityInfo(_Model): - """A locality hint that can be used by the Batch service to select a Compute Node - on which to start a Task. + """A locality hint that can be used by the Batch service to select a Compute Node on which to + start a Task. :ivar affinity_id: An opaque string representing the location of a Compute Node or a Task that has run previously. You can pass the affinityId of a Node to indicate that this Task needs to @@ -514,8 +514,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchAutoPoolSpecification(_Model): - """Specifies characteristics for a temporary 'auto pool'. The Batch service will - create this auto Pool when the Job is submitted. + """Specifies characteristics for a temporary 'auto pool'. The Batch service will create this auto + Pool when the Job is submitted. :ivar auto_pool_id_prefix: A prefix to be added to the unique identifier when a Pool is automatically created. The Batch service assigns each auto Pool a unique identifier on @@ -635,12 +635,12 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchCreateTaskCollectionResult(_Model): """The result of creating a collection of Tasks to a Job. - :ivar values_property: The results of the create Task collection operation. - :vartype values_property: list[~azure.batch.models.BatchTaskCreateResult] + :ivar result_values: The results of the create Task collection operation. + :vartype result_values: list[~azure.batch.models.BatchTaskCreateResult] """ - values_property: Optional[list["_models.BatchTaskCreateResult"]] = rest_field( - name="value", visibility=["read", "create", "update", "delete", "query"], original_tsp_name="values" + result_values: Optional[list["_models.BatchTaskCreateResult"]] = rest_field( + name="value", visibility=["read", "create", "update", "delete", "query"] ) """The results of the create Task collection operation.""" @@ -648,7 +648,7 @@ class BatchCreateTaskCollectionResult(_Model): def __init__( self, *, - values_property: Optional[list["_models.BatchTaskCreateResult"]] = None, + result_values: Optional[list["_models.BatchTaskCreateResult"]] = None, ) -> None: ... @overload @@ -663,8 +663,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchDiffDiskSettings(_Model): - """Specifies the ephemeral Disk Settings for the operating system disk used by the - compute node (VM). + """Specifies the ephemeral Disk Settings for the operating system disk used by the compute node + (VM). :ivar placement: Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., @@ -720,9 +720,9 @@ class BatchError(_Model): :ivar message: A message describing the error, intended to be suitable for display in a user interface. :vartype message: ~azure.batch.models.BatchErrorMessage - :ivar values_property: A collection of key-value pairs containing additional details about the + :ivar error_values: A collection of key-value pairs containing additional details about the error. - :vartype values_property: list[~azure.batch.models.BatchErrorDetail] + :vartype error_values: list[~azure.batch.models.BatchErrorDetail] """ code: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -732,8 +732,8 @@ class BatchError(_Model): visibility=["read", "create", "update", "delete", "query"] ) """A message describing the error, intended to be suitable for display in a user interface.""" - values_property: Optional[list["_models.BatchErrorDetail"]] = rest_field( - name="values", visibility=["read", "create", "update", "delete", "query"], original_tsp_name="values" + error_values: Optional[list["_models.BatchErrorDetail"]] = rest_field( + name="values", visibility=["read", "create", "update", "delete", "query"] ) """A collection of key-value pairs containing additional details about the error.""" @@ -743,7 +743,7 @@ def __init__( *, code: Optional[str] = None, message: Optional["_models.BatchErrorMessage"] = None, - values_property: Optional[list["_models.BatchErrorDetail"]] = None, + error_values: Optional[list["_models.BatchErrorDetail"]] = None, ) -> None: ... @overload @@ -824,8 +824,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchInboundNatPool(_Model): - """A inbound NAT Pool that can be used to address specific ports on Compute Nodes - in a Batch Pool externally. + """A inbound NAT Pool that can be used to address specific ports on Compute Nodes in a Batch Pool + externally. :ivar name: The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, @@ -937,11 +937,11 @@ class BatchJob(_Model): :vartype uses_task_dependencies: bool :ivar url: The URL of the Job. Required. :vartype url: str - :ivar e_tag: The ETag of the Job. This is an opaque string. You can use it to detect whether - the Job has changed between requests. In particular, you can be pass the ETag when updating a - Job to specify that your changes should take effect only if nobody else has modified the Job in - the meantime. Required. - :vartype e_tag: str + :ivar etag: The ETag of the Job. This is an opaque string. You can use it to detect whether the + Job has changed between requests. In particular, you can be pass the ETag when updating a Job + to specify that your changes should take effect only if nobody else has modified the Job in the + meantime. Required. + :vartype etag: str :ivar last_modified: The last modified time of the Job. This is the last time at which the Job level data, such as the Job state or priority, changed. It does not factor in task-level changes such as adding new Tasks or Tasks changing state. Required. @@ -1028,7 +1028,7 @@ class BatchJob(_Model): """Whether Tasks in the Job can define dependencies on each other. The default is false.""" url: str = rest_field(visibility=["read"]) """The URL of the Job. Required.""" - e_tag: str = rest_field(name="eTag", visibility=["read"]) + etag: str = rest_field(name="eTag", visibility=["read"]) """The ETag of the Job. This is an opaque string. You can use it to detect whether the Job has changed between requests. In particular, you can be pass the ETag when updating a Job to specify that your changes should take effect only if nobody else has modified the Job in the @@ -1550,29 +1550,24 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchJobManagerTask(_Model): - """Specifies details of a Job Manager Task. - The Job Manager Task is automatically started when the Job is created. The - Batch service tries to schedule the Job Manager Task before any other Tasks in - the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where - Job Manager Tasks are running for as long as possible (that is, Compute Nodes - running 'normal' Tasks are removed before Compute Nodes running Job Manager - Tasks). When a Job Manager Task fails and needs to be restarted, the system - tries to schedule it at the highest priority. If there are no idle Compute - Nodes available, the system may terminate one of the running Tasks in the Pool - and return it to the queue in order to make room for the Job Manager Task to - restart. Note that a Job Manager Task in one Job does not have priority over - Tasks in other Jobs. Across Jobs, only Job level priorities are observed. For - example, if a Job Manager in a priority 0 Job needs to be restarted, it will - not displace Tasks of a priority 1 Job. Batch will retry Tasks when a recovery - operation is triggered on a Node. Examples of recovery operations include (but - are not limited to) when an unhealthy Node is rebooted or a Compute Node - disappeared due to host failure. Retries due to recovery operations are - independent of and are not counted against the maxTaskRetryCount. Even if the - maxTaskRetryCount is 0, an internal retry due to a recovery operation may - occur. Because of this, all Tasks should be idempotent. This means Tasks need - to tolerate being interrupted and restarted without causing any corruption or - duplicate data. The best practice for long running Tasks is to use some form of - checkpointing. + """Specifies details of a Job Manager Task. The Job Manager Task is automatically started when the + Job is created. The Batch service tries to schedule the Job Manager Task before any other Tasks + in the Job. When shrinking a Pool, the Batch service tries to preserve Nodes where Job Manager + Tasks are running for as long as possible (that is, Compute Nodes running 'normal' Tasks are + removed before Compute Nodes running Job Manager Tasks). When a Job Manager Task fails and + needs to be restarted, the system tries to schedule it at the highest priority. If there are no + idle Compute Nodes available, the system may terminate one of the running Tasks in the Pool and + return it to the queue in order to make room for the Job Manager Task to restart. Note that a + Job Manager Task in one Job does not have priority over Tasks in other Jobs. Across Jobs, only + Job level priorities are observed. For example, if a Job Manager in a priority 0 Job needs to + be restarted, it will not displace Tasks of a priority 1 Job. Batch will retry Tasks when a + recovery operation is triggered on a Node. Examples of recovery operations include (but are not + limited to) when an unhealthy Node is rebooted or a Compute Node disappeared due to host + failure. Retries due to recovery operations are independent of and are not counted against the + maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry due to a recovery + operation may occur. Because of this, all Tasks should be idempotent. This means Tasks need to + tolerate being interrupted and restarted without causing any corruption or duplicate data. The + best practice for long running Tasks is to use some form of checkpointing. :ivar id: A string that uniquely identifies the Job Manager Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot @@ -1643,14 +1638,12 @@ class BatchJobManagerTask(_Model): is true. :vartype run_exclusive: bool :ivar application_package_references: A list of Application Packages that the Batch service - will deploy to the - Compute Node before running the command line.Application Packages are - downloaded and deployed to a shared directory, not the Task working - directory. Therefore, if a referenced Application Package is already - on the Compute Node, and is up to date, then it is not re-downloaded; - the existing copy on the Compute Node is used. If a referenced Application - Package cannot be installed, for example because the package has been deleted - or because download failed, the Task fails. + will deploy to the Compute Node before running the command line.Application Packages are + downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a + referenced Application Package is already on the Compute Node, and is up to date, then it is + not re-downloaded; the existing copy on the Compute Node is used. If a referenced Application + Package cannot be installed, for example because the package has been deleted or because + download failed, the Task fails. :vartype application_package_references: list[~azure.batch.models.BatchApplicationPackageReference] :ivar authentication_token_settings: The settings for an authentication token that the Task can @@ -1755,14 +1748,12 @@ class BatchJobManagerTask(_Model): application_package_references: Optional[list["_models.BatchApplicationPackageReference"]] = rest_field( name="applicationPackageReferences", visibility=["read", "create", "update", "delete", "query"] ) - """A list of Application Packages that the Batch service will deploy to the - Compute Node before running the command line.Application Packages are - downloaded and deployed to a shared directory, not the Task working - directory. Therefore, if a referenced Application Package is already - on the Compute Node, and is up to date, then it is not re-downloaded; - the existing copy on the Compute Node is used. If a referenced Application - Package cannot be installed, for example because the package has been deleted - or because download failed, the Task fails.""" + """A list of Application Packages that the Batch service will deploy to the Compute Node before + running the command line.Application Packages are downloaded and deployed to a shared + directory, not the Task working directory. Therefore, if a referenced Application Package is + already on the Compute Node, and is up to date, then it is not re-downloaded; the existing copy + on the Compute Node is used. If a referenced Application Package cannot be installed, for + example because the package has been deleted or because download failed, the Task fails.""" authentication_token_settings: Optional["_models.AuthenticationTokenSettings"] = rest_field( name="authenticationTokenSettings", visibility=["read", "create", "update", "delete", "query"] ) @@ -1947,32 +1938,27 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchJobPreparationTask(_Model): - """A Job Preparation Task to run before any Tasks of the Job on any given Compute Node. - You can use Job Preparation to prepare a Node to run Tasks for the Job. - Activities commonly performed in Job Preparation include: Downloading common - resource files used by all the Tasks in the Job. The Job Preparation Task can - download these common resource files to the shared location on the Node. - (AZ_BATCH_NODE_ROOT_DIR\\shared), or starting a local service on the Node so - that all Tasks of that Job can communicate with it. If the Job Preparation Task - fails (that is, exhausts its retry count before exiting with exit code 0), - Batch will not run Tasks of this Job on the Node. The Compute Node remains - ineligible to run Tasks of this Job until it is reimaged. The Compute Node - remains active and can be used for other Jobs. The Job Preparation Task can run - multiple times on the same Node. Therefore, you should write the Job - Preparation Task to handle re-execution. If the Node is rebooted, the Job - Preparation Task is run again on the Compute Node before scheduling any other - Task of the Job, if rerunOnNodeRebootAfterSuccess is true or if the Job - Preparation Task did not previously complete. If the Node is reimaged, the Job - Preparation Task is run again before scheduling any Task of the Job. Batch will - retry Tasks when a recovery operation is triggered on a Node. Examples of - recovery operations include (but are not limited to) when an unhealthy Node is - rebooted or a Compute Node disappeared due to host failure. Retries due to - recovery operations are independent of and are not counted against the - maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry due to - a recovery operation may occur. Because of this, all Tasks should be - idempotent. This means Tasks need to tolerate being interrupted and restarted - without causing any corruption or duplicate data. The best practice for long - running Tasks is to use some form of checkpointing. + """A Job Preparation Task to run before any Tasks of the Job on any given Compute Node. You can + use Job Preparation to prepare a Node to run Tasks for the Job. Activities commonly performed + in Job Preparation include: Downloading common resource files used by all the Tasks in the Job. + The Job Preparation Task can download these common resource files to the shared location on the + Node. (AZ_BATCH_NODE_ROOT_DIR\\shared), or starting a local service on the Node so that all + Tasks of that Job can communicate with it. If the Job Preparation Task fails (that is, exhausts + its retry count before exiting with exit code 0), Batch will not run Tasks of this Job on the + Node. The Compute Node remains ineligible to run Tasks of this Job until it is reimaged. The + Compute Node remains active and can be used for other Jobs. The Job Preparation Task can run + multiple times on the same Node. Therefore, you should write the Job Preparation Task to handle + re-execution. If the Node is rebooted, the Job Preparation Task is run again on the Compute + Node before scheduling any other Task of the Job, if rerunOnNodeRebootAfterSuccess is true or + if the Job Preparation Task did not previously complete. If the Node is reimaged, the Job + Preparation Task is run again before scheduling any Task of the Job. Batch will retry Tasks + when a recovery operation is triggered on a Node. Examples of recovery operations include (but + are not limited to) when an unhealthy Node is rebooted or a Compute Node disappeared due to + host failure. Retries due to recovery operations are independent of and are not counted against + the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal retry due to a recovery + operation may occur. Because of this, all Tasks should be idempotent. This means Tasks need to + tolerate being interrupted and restarted without causing any corruption or duplicate data. The + best practice for long running Tasks is to use some form of checkpointing. :ivar id: A string that uniquely identifies the Job Preparation Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot @@ -2137,8 +2123,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchJobPreparationTaskExecutionInfo(_Model): - """Contains information about the execution of a Job Preparation Task on a Compute - Node. + """Contains information about the execution of a Job Preparation Task on a Compute Node. :ivar start_time: The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. Required. @@ -2279,22 +2264,19 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchJobReleaseTask(_Model): - """A Job Release Task to run on Job completion on any Compute Node where the Job has run. - The Job Release Task runs when the Job ends, because of one of the following: - The user calls the Terminate Job API, or the Delete Job API while the Job is - still active, the Job's maximum wall clock time constraint is reached, and the - Job is still active, or the Job's Job Manager Task completed, and the Job is - configured to terminate when the Job Manager completes. The Job Release Task - runs on each Node where Tasks of the Job have run and the Job Preparation Task - ran and completed. If you reimage a Node after it has run the Job Preparation - Task, and the Job ends without any further Tasks of the Job running on that - Node (and hence the Job Preparation Task does not re-run), then the Job Release - Task does not run on that Compute Node. If a Node reboots while the Job Release - Task is still running, the Job Release Task runs again when the Compute Node - starts up. The Job is not marked as complete until all Job Release Tasks have - completed. The Job Release Task runs in the background. It does not occupy a - scheduling slot; that is, it does not count towards the taskSlotsPerNode limit - specified on the Pool. + """A Job Release Task to run on Job completion on any Compute Node where the Job has run. The Job + Release Task runs when the Job ends, because of one of the following: The user calls the + Terminate Job API, or the Delete Job API while the Job is still active, the Job's maximum wall + clock time constraint is reached, and the Job is still active, or the Job's Job Manager Task + completed, and the Job is configured to terminate when the Job Manager completes. The Job + Release Task runs on each Node where Tasks of the Job have run and the Job Preparation Task ran + and completed. If you reimage a Node after it has run the Job Preparation Task, and the Job + ends without any further Tasks of the Job running on that Node (and hence the Job Preparation + Task does not re-run), then the Job Release Task does not run on that Compute Node. If a Node + reboots while the Job Release Task is still running, the Job Release Task runs again when the + Compute Node starts up. The Job is not marked as complete until all Job Release Tasks have + completed. The Job Release Task runs in the background. It does not occupy a scheduling slot; + that is, it does not count towards the taskSlotsPerNode limit specified on the Pool. :ivar id: A string that uniquely identifies the Job Release Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores and cannot @@ -2434,8 +2416,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchJobReleaseTaskExecutionInfo(_Model): - """Contains information about the execution of a Job Release Task on a Compute - Node. + """Contains information about the execution of a Job Release Task on a Compute Node. :ivar start_time: The time at which the Task started running. If the Task has been restarted or retried, this is the most recent time at which the Task started running. Required. @@ -2545,8 +2526,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchJobSchedule(_Model): - """A Job Schedule that allows recurring Jobs by specifying when to run Jobs and a - specification used to create each Job. + """A Job Schedule that allows recurring Jobs by specifying when to run Jobs and a specification + used to create each Job. :ivar id: A string that uniquely identifies the schedule within the Account. Required. :vartype id: str @@ -2554,11 +2535,11 @@ class BatchJobSchedule(_Model): :vartype display_name: str :ivar url: The URL of the Job Schedule. Required. :vartype url: str - :ivar e_tag: The ETag of the Job Schedule. This is an opaque string. You can use it to detect + :ivar etag: The ETag of the Job Schedule. This is an opaque string. You can use it to detect whether the Job Schedule has changed between requests. In particular, you can be pass the ETag with an Update Job Schedule request to specify that your changes should take effect only if nobody else has modified the schedule in the meantime. Required. - :vartype e_tag: str + :vartype etag: str :ivar last_modified: The last modified time of the Job Schedule. This is the last time at which the schedule level data, such as the Job specification or recurrence information, changed. It does not factor in job-level changes such as new Jobs being created or Jobs changing state. @@ -2602,7 +2583,7 @@ class BatchJobSchedule(_Model): """The display name for the schedule.""" url: str = rest_field(visibility=["read"]) """The URL of the Job Schedule. Required.""" - e_tag: str = rest_field(name="eTag", visibility=["read"]) + etag: str = rest_field(name="eTag", visibility=["read"]) """The ETag of the Job Schedule. This is an opaque string. You can use it to detect whether the Job Schedule has changed between requests. In particular, you can be pass the ETag with an Update Job Schedule request to specify that your changes should take effect only if nobody else @@ -2675,8 +2656,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchJobScheduleConfiguration(_Model): - """The schedule according to which Jobs will be created. All times are fixed - respective to UTC and are not impacted by daylight saving time. + """The schedule according to which Jobs will be created. All times are fixed respective to UTC and + are not impacted by daylight saving time. :ivar do_not_run_until: The earliest time at which any Job may be created under this Job Schedule. If you do not specify a doNotRunUntil time, the schedule becomes ready to create Jobs @@ -2847,8 +2828,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchJobScheduleExecutionInfo(_Model): - """Contains information about Jobs that have been and will be run under a Job - Schedule. + """Contains information about Jobs that have been and will be run under a Job Schedule. :ivar next_run_time: The next time at which a Job will be created under this schedule. This property is meaningful only if the schedule is in the active state when the time comes around. @@ -3674,8 +3654,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchMetadataItem(_Model): - """The Batch service does not assign any meaning to this metadata; it is solely - for the use of user code. + """The Batch service does not assign any meaning to this metadata; it is solely for the use of + user code. :ivar name: The name of the metadata item. Required. :vartype name: str @@ -3879,8 +3859,8 @@ class BatchNode(_Model): class BatchNodeAgentInfo(_Model): - """The Batch Compute Node agent is a program that runs on each Compute Node in the - Pool and provides Batch capability on the Compute Node. + """The Batch Compute Node agent is a program that runs on each Compute Node in the Pool and + provides Batch capability on the Compute Node. :ivar version: The version of the Batch Compute Node agent running on the Compute Node. This version number can be checked against the Compute Node agent release notes located at @@ -4227,8 +4207,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchNodeIdentityReference(_Model): - """The reference to a user assigned identity associated with the Batch pool which - a compute node will use. + """The reference to a user assigned identity associated with the Batch pool which a compute node + will use. :ivar resource_id: The ARM resource id of the user assigned identity. :vartype resource_id: str @@ -4319,9 +4299,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchNodePlacementConfiguration(_Model): - """For regional placement, nodes in the pool will be allocated in the same region. - For zonal placement, nodes in the pool will be spread across different zones - with best effort balancing. + """For regional placement, nodes in the pool will be allocated in the same region. For zonal + placement, nodes in the pool will be spread across different zones with best effort balancing. :ivar policy: Node placement Policy type on Batch Pools. Allocation policy used by Batch Service to provision the nodes. If not specified, Batch will use the regional policy. Known @@ -4594,7 +4573,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class BatchNodeUserUpdateOptions(_Model): +class BatchNodeUserReplaceOptions(_Model): """Parameters for updating a user account for RDP or SSH access on an Azure Batch Compute Node. :ivar password: The password of the Account. The password is required for Windows Compute @@ -4773,11 +4752,11 @@ class BatchPool(_Model): :vartype display_name: str :ivar url: The URL of the Pool. Required. :vartype url: str - :ivar e_tag: The ETag of the Pool. This is an opaque string. You can use it to detect whether + :ivar etag: The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the meantime. Required. - :vartype e_tag: str + :vartype etag: str :ivar last_modified: The last modified time of the Pool. This is the last time at which the Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a Compute Node changing state. Required. @@ -4894,7 +4873,7 @@ class BatchPool(_Model): length of 1024.""" url: str = rest_field(visibility=["read"]) """The URL of the Pool. Required.""" - e_tag: str = rest_field(name="eTag", visibility=["read"]) + etag: str = rest_field(name="eTag", visibility=["read"]) """The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the @@ -5747,36 +5726,36 @@ class BatchPoolResourceStatistics(_Model): :ivar avg_cpu_percentage: The average CPU usage across all Compute Nodes in the Pool (percentage per node). Required. :vartype avg_cpu_percentage: float - :ivar avg_memory_gi_b: The average memory usage in GiB across all Compute Nodes in the Pool. + :ivar avg_memory_gib: The average memory usage in GiB across all Compute Nodes in the Pool. Required. - :vartype avg_memory_gi_b: float - :ivar peak_memory_gi_b: The peak memory usage in GiB across all Compute Nodes in the Pool. + :vartype avg_memory_gib: float + :ivar peak_memory_gib: The peak memory usage in GiB across all Compute Nodes in the Pool. Required. - :vartype peak_memory_gi_b: float - :ivar avg_disk_gi_b: The average used disk space in GiB across all Compute Nodes in the Pool. + :vartype peak_memory_gib: float + :ivar avg_disk_gib: The average used disk space in GiB across all Compute Nodes in the Pool. Required. - :vartype avg_disk_gi_b: float - :ivar peak_disk_gi_b: The peak used disk space in GiB across all Compute Nodes in the Pool. + :vartype avg_disk_gib: float + :ivar peak_disk_gib: The peak used disk space in GiB across all Compute Nodes in the Pool. Required. - :vartype peak_disk_gi_b: float + :vartype peak_disk_gib: float :ivar disk_read_iops: The total number of disk read operations across all Compute Nodes in the Pool. Required. :vartype disk_read_iops: int :ivar disk_write_iops: The total number of disk write operations across all Compute Nodes in the Pool. Required. :vartype disk_write_iops: int - :ivar disk_read_gi_b: The total amount of data in GiB of disk reads across all Compute Nodes in + :ivar disk_read_gib: The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. Required. - :vartype disk_read_gi_b: float - :ivar disk_write_gi_b: The total amount of data in GiB of disk writes across all Compute Nodes + :vartype disk_read_gib: float + :ivar disk_write_gib: The total amount of data in GiB of disk writes across all Compute Nodes in the Pool. Required. - :vartype disk_write_gi_b: float - :ivar network_read_gi_b: The total amount of data in GiB of network reads across all Compute + :vartype disk_write_gib: float + :ivar network_read_gib: The total amount of data in GiB of network reads across all Compute Nodes in the Pool. Required. - :vartype network_read_gi_b: float - :ivar network_write_gi_b: The total amount of data in GiB of network writes across all Compute + :vartype network_read_gib: float + :ivar network_write_gib: The total amount of data in GiB of network writes across all Compute Nodes in the Pool. Required. - :vartype network_write_gi_b: float + :vartype network_write_gib: float """ start_time: datetime.datetime = rest_field( @@ -5792,15 +5771,15 @@ class BatchPoolResourceStatistics(_Model): name="avgCPUPercentage", visibility=["read", "create", "update", "delete", "query"] ) """The average CPU usage across all Compute Nodes in the Pool (percentage per node). Required.""" - avg_memory_gi_b: float = rest_field(name="avgMemoryGiB", visibility=["read", "create", "update", "delete", "query"]) + avg_memory_gib: float = rest_field(name="avgMemoryGiB", visibility=["read", "create", "update", "delete", "query"]) """The average memory usage in GiB across all Compute Nodes in the Pool. Required.""" - peak_memory_gi_b: float = rest_field( + peak_memory_gib: float = rest_field( name="peakMemoryGiB", visibility=["read", "create", "update", "delete", "query"] ) """The peak memory usage in GiB across all Compute Nodes in the Pool. Required.""" - avg_disk_gi_b: float = rest_field(name="avgDiskGiB", visibility=["read", "create", "update", "delete", "query"]) + avg_disk_gib: float = rest_field(name="avgDiskGiB", visibility=["read", "create", "update", "delete", "query"]) """The average used disk space in GiB across all Compute Nodes in the Pool. Required.""" - peak_disk_gi_b: float = rest_field(name="peakDiskGiB", visibility=["read", "create", "update", "delete", "query"]) + peak_disk_gib: float = rest_field(name="peakDiskGiB", visibility=["read", "create", "update", "delete", "query"]) """The peak used disk space in GiB across all Compute Nodes in the Pool. Required.""" disk_read_iops: int = rest_field( name="diskReadIOps", visibility=["read", "create", "update", "delete", "query"], format="str" @@ -5810,16 +5789,16 @@ class BatchPoolResourceStatistics(_Model): name="diskWriteIOps", visibility=["read", "create", "update", "delete", "query"], format="str" ) """The total number of disk write operations across all Compute Nodes in the Pool. Required.""" - disk_read_gi_b: float = rest_field(name="diskReadGiB", visibility=["read", "create", "update", "delete", "query"]) + disk_read_gib: float = rest_field(name="diskReadGiB", visibility=["read", "create", "update", "delete", "query"]) """The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. Required.""" - disk_write_gi_b: float = rest_field(name="diskWriteGiB", visibility=["read", "create", "update", "delete", "query"]) + disk_write_gib: float = rest_field(name="diskWriteGiB", visibility=["read", "create", "update", "delete", "query"]) """The total amount of data in GiB of disk writes across all Compute Nodes in the Pool. Required.""" - network_read_gi_b: float = rest_field( + network_read_gib: float = rest_field( name="networkReadGiB", visibility=["read", "create", "update", "delete", "query"] ) """The total amount of data in GiB of network reads across all Compute Nodes in the Pool. Required.""" - network_write_gi_b: float = rest_field( + network_write_gib: float = rest_field( name="networkWriteGiB", visibility=["read", "create", "update", "delete", "query"] ) """The total amount of data in GiB of network writes across all Compute Nodes in the Pool. @@ -5832,16 +5811,16 @@ def __init__( start_time: datetime.datetime, last_update_time: datetime.datetime, avg_cpu_percentage: float, - avg_memory_gi_b: float, - peak_memory_gi_b: float, - avg_disk_gi_b: float, - peak_disk_gi_b: float, + avg_memory_gib: float, + peak_memory_gib: float, + avg_disk_gib: float, + peak_disk_gib: float, disk_read_iops: int, disk_write_iops: int, - disk_read_gi_b: float, - disk_write_gi_b: float, - network_read_gi_b: float, - network_write_gi_b: float, + disk_read_gib: float, + disk_write_gib: float, + network_read_gib: float, + network_write_gib: float, ) -> None: ... @overload @@ -6501,19 +6480,17 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchStartTask(_Model): - """Batch will retry Tasks when a recovery operation is triggered on a Node. - Examples of recovery operations include (but are not limited to) when an - unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - Retries due to recovery operations are independent of and are not counted - against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - retry due to a recovery operation may occur. Because of this, all Tasks should - be idempotent. This means Tasks need to tolerate being interrupted and - restarted without causing any corruption or duplicate data. The best practice - for long running Tasks is to use some form of checkpointing. In some cases the - StartTask may be re-run even though the Compute Node was not rebooted. Special - care should be taken to avoid StartTasks which create breakaway process or - install/launch services from the StartTask working directory, as this will - block Batch from being able to re-run the StartTask. + """Batch will retry Tasks when a recovery operation is triggered on a Node. Examples of recovery + operations include (but are not limited to) when an unhealthy Node is rebooted or a Compute + Node disappeared due to host failure. Retries due to recovery operations are independent of and + are not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal + retry due to a recovery operation may occur. Because of this, all Tasks should be idempotent. + This means Tasks need to tolerate being interrupted and restarted without causing any + corruption or duplicate data. The best practice for long running Tasks is to use some form of + checkpointing. In some cases the StartTask may be re-run even though the Compute Node was not + rebooted. Special care should be taken to avoid StartTasks which create breakaway process or + install/launch services from the StartTask working directory, as this will block Batch from + being able to re-run the StartTask. :ivar command_line: The command line of the StartTask. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable @@ -6909,8 +6886,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchSupportedImage(_Model): - """A reference to the Azure Virtual Machines Marketplace Image and additional - information about the Image. + """A reference to the Azure Virtual Machines Marketplace Image and additional information about + the Image. :ivar node_agent_sku_id: The ID of the Compute Node agent SKU which the Image supports. Required. @@ -6985,15 +6962,14 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchTask(_Model): - """Batch will retry Tasks when a recovery operation is triggered on a Node. - Examples of recovery operations include (but are not limited to) when an - unhealthy Node is rebooted or a Compute Node disappeared due to host failure. - Retries due to recovery operations are independent of and are not counted - against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal - retry due to a recovery operation may occur. Because of this, all Tasks should - be idempotent. This means Tasks need to tolerate being interrupted and - restarted without causing any corruption or duplicate data. The best practice - for long running Tasks is to use some form of checkpointing. + """Batch will retry Tasks when a recovery operation is triggered on a Node. Examples of recovery + operations include (but are not limited to) when an unhealthy Node is rebooted or a Compute + Node disappeared due to host failure. Retries due to recovery operations are independent of and + are not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is 0, an internal + retry due to a recovery operation may occur. Because of this, all Tasks should be idempotent. + This means Tasks need to tolerate being interrupted and restarted without causing any + corruption or duplicate data. The best practice for long running Tasks is to use some form of + checkpointing. :ivar id: A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain @@ -7004,11 +6980,11 @@ class BatchTask(_Model): :vartype display_name: str :ivar url: The URL of the Task. Required. :vartype url: str - :ivar e_tag: The ETag of the Task. This is an opaque string. You can use it to detect whether + :ivar etag: The ETag of the Task. This is an opaque string. You can use it to detect whether the Task has changed between requests. In particular, you can be pass the ETag when updating a Task to specify that your changes should take effect only if nobody else has modified the Task in the meantime. Required. - :vartype e_tag: str + :vartype etag: str :ivar last_modified: The last modified time of the Task. Required. :vartype last_modified: ~datetime.datetime :ivar creation_time: The creation time of the Task. Required. @@ -7116,7 +7092,7 @@ class BatchTask(_Model): characters up to a maximum length of 1024.""" url: str = rest_field(visibility=["read"]) """The URL of the Task. Required.""" - e_tag: str = rest_field(name="eTag", visibility=["read"]) + etag: str = rest_field(name="eTag", visibility=["read"]) """The ETag of the Task. This is an opaque string. You can use it to detect whether the Task has changed between requests. In particular, you can be pass the ETag when updating a Task to specify that your changes should take effect only if nobody else has modified the Task in the @@ -7771,11 +7747,11 @@ class BatchTaskCreateResult(_Model): :vartype status: str or ~azure.batch.models.BatchTaskAddStatus :ivar task_id: The ID of the Task for which this is the result. Required. :vartype task_id: str - :ivar e_tag: The ETag of the Task, if the Task was successfully added. You can use this to + :ivar etag: The ETag of the Task, if the Task was successfully added. You can use this to detect whether the Task has changed between requests. In particular, you can be pass the ETag with an Update Task request to specify that your changes should take effect only if nobody else has modified the Job in the meantime. - :vartype e_tag: str + :vartype etag: str :ivar last_modified: The last modified time of the Task. :vartype last_modified: ~datetime.datetime :ivar location: The URL of the Task, if the Task was successfully added. @@ -7791,7 +7767,7 @@ class BatchTaskCreateResult(_Model): and \"servererror\".""" task_id: str = rest_field(name="taskId", visibility=["read", "create", "update", "delete", "query"]) """The ID of the Task for which this is the result. Required.""" - e_tag: Optional[str] = rest_field(name="eTag", visibility=["read", "create", "update", "delete", "query"]) + etag: Optional[str] = rest_field(name="eTag", visibility=["read", "create", "update", "delete", "query"]) """The ETag of the Task, if the Task was successfully added. You can use this to detect whether the Task has changed between requests. In particular, you can be pass the ETag with an Update Task request to specify that your changes should take effect only if nobody else has modified @@ -7811,7 +7787,7 @@ def __init__( *, status: Union[str, "_models.BatchTaskAddStatus"], task_id: str, - e_tag: Optional[str] = None, + etag: Optional[str] = None, last_modified: Optional[datetime.datetime] = None, location: Optional[str] = None, error: Optional["_models.BatchError"] = None, @@ -7829,9 +7805,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchTaskDependencies(_Model): - """Specifies any dependencies of a Task. Any Task that is explicitly specified or - within a dependency range must complete before the dependant Task will be - scheduled. + """Specifies any dependencies of a Task. Any Task that is explicitly specified or within a + dependency range must complete before the dependant Task will be scheduled. :ivar task_ids: The list of Task IDs that this Task depends on. All Tasks in this list must complete successfully before the dependent Task can be scheduled. The taskIds collection is @@ -8071,15 +8046,15 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchTaskGroup(_Model): """A collection of Azure Batch Tasks to add. - :ivar values_property: The collection of Tasks to add. The maximum count of Tasks is 100. The - total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for + :ivar task_values: The collection of Tasks to add. The maximum count of Tasks is 100. The total + serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. Required. - :vartype values_property: list[~azure.batch.models.BatchTaskCreateOptions] + :vartype task_values: list[~azure.batch.models.BatchTaskCreateOptions] """ - values_property: list["_models.BatchTaskCreateOptions"] = rest_field( - name="value", visibility=["read", "create", "update", "delete", "query"], original_tsp_name="values" + task_values: list["_models.BatchTaskCreateOptions"] = rest_field( + name="value", visibility=["read", "create", "update", "delete", "query"] ) """The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has @@ -8090,7 +8065,7 @@ class BatchTaskGroup(_Model): def __init__( self, *, - values_property: list["_models.BatchTaskCreateOptions"], + task_values: list["_models.BatchTaskCreateOptions"], ) -> None: ... @overload @@ -8105,8 +8080,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchTaskIdRange(_Model): - """The start and end of the range are inclusive. For example, if a range has start - 9 and end 12, then it represents Tasks '9', '10', '11' and '12'. + """The start and end of the range are inclusive. For example, if a range has start 9 and end 12, + then it represents Tasks '9', '10', '11' and '12'. :ivar start: The first Task ID in the range. Required. :vartype start: int @@ -8512,9 +8487,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class BatchVmImageReference(_Model): - """A reference to an Azure Virtual Machines Marketplace Image or a Azure Compute Gallery Image. - To get the list of all Azure Marketplace Image references verified by Azure Batch, see the - ' List Supported Images ' operation. + """A reference to an Azure Virtual Machines Marketplace Image or a Azure Compute Gallery Image. To + get the list of all Azure Marketplace Image references verified by Azure Batch, see the ' List + Supported Images ' operation. :ivar publisher: The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer. @@ -8774,9 +8749,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class DataDisk(_Model): - """Settings which will be used by the data disks associated to Compute Nodes in - the Pool. When using attached data disks, you need to mount and format the - disks from within a VM to use them. + """Settings which will be used by the data disks associated to Compute Nodes in the Pool. When + using attached data disks, you need to mount and format the disks from within a VM to use them. :ivar logical_unit_number: The logical unit number. The logicalUnitNumber is used to uniquely identify each data disk. If attaching multiple disks, each should have a distinct @@ -8883,9 +8857,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class DiskEncryptionConfiguration(_Model): - """The disk encryption configuration applied on compute nodes in the pool. - Disk encryption configuration is not supported on Linux pool created with - Azure Compute Gallery Image. + """The disk encryption configuration applied on compute nodes in the pool. Disk encryption + configuration is not supported on Linux pool created with Azure Compute Gallery Image. :ivar customer_managed_key: The Customer Managed Key reference to encrypt the OS Disk. Customer Managed Key will encrypt OS Disk by EncryptionAtRest, and by default we will encrypt the data @@ -8993,8 +8966,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class ExitCodeMapping(_Model): - """How the Batch service should respond if a Task exits with a particular exit - code. + """How the Batch service should respond if a Task exits with a particular exit code. :ivar code: A process exit code. Required. :vartype code: int @@ -9030,8 +9002,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class ExitCodeRangeMapping(_Model): - """A range of exit codes and how the Batch service should respond to exit codes - within that range. + """A range of exit codes and how the Batch service should respond to exit codes within that range. :ivar start: The first exit code in the range. Required. :vartype start: int @@ -9610,10 +9581,9 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class MultiInstanceSettings(_Model): - """Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, - if any of the subtasks fail (for example due to exiting with a non-zero exit - code) the entire multi-instance Task fails. The multi-instance Task is then - terminated and retried, up to its retry limit. + """Multi-instance Tasks are commonly used to support MPI Tasks. In the MPI case, if any of the + subtasks fail (for example due to exiting with a non-zero exit code) the entire multi-instance + Task fails. The multi-instance Task is then terminated and retried, up to its retry limit. :ivar number_of_instances: The number of Compute Nodes required by the Task. If omitted, the default is 1. @@ -10112,8 +10082,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class OutputFileUploadConfig(_Model): - """Options for an output file upload operation, including under what conditions - to perform the upload. + """Options for an output file upload operation, including under what conditions to perform the + upload. :ivar upload_condition: The conditions under which the Task output file or set of files should be uploaded. The default is taskcompletion. Required. Known values are: "tasksuccess", @@ -10266,8 +10236,8 @@ class ResizeError(_Model): :ivar message: A message describing the Pool resize error, intended to be suitable for display in a user interface. :vartype message: str - :ivar values_property: A list of additional error details related to the Pool resize error. - :vartype values_property: list[~azure.batch.models.NameValuePair] + :ivar error_values: A list of additional error details related to the Pool resize error. + :vartype error_values: list[~azure.batch.models.NameValuePair] """ code: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -10276,8 +10246,8 @@ class ResizeError(_Model): message: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A message describing the Pool resize error, intended to be suitable for display in a user interface.""" - values_property: Optional[list["_models.NameValuePair"]] = rest_field( - name="values", visibility=["read", "create", "update", "delete", "query"], original_tsp_name="values" + error_values: Optional[list["_models.NameValuePair"]] = rest_field( + name="values", visibility=["read", "create", "update", "delete", "query"] ) """A list of additional error details related to the Pool resize error.""" @@ -10287,7 +10257,7 @@ def __init__( *, code: Optional[str] = None, message: Optional[str] = None, - values_property: Optional[list["_models.NameValuePair"]] = None, + error_values: Optional[list["_models.NameValuePair"]] = None, ) -> None: ... @overload @@ -10600,8 +10570,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class ServiceArtifactReference(_Model): - """Specifies the service artifact reference id used to set same image version - for all virtual machines in the scale set when using 'latest' image version. + """Specifies the service artifact reference id used to set same image version for all virtual + machines in the scale set when using 'latest' image version. :ivar id: The service artifact reference id of ServiceArtifactReference. The service artifact reference id in the form of @@ -10806,8 +10776,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class UserAccount(_Model): - """Properties used to create a user used to execute Tasks on an Azure Batch - Compute Node. + """Properties used to create a user used to execute Tasks on an Azure Batch Compute Node. :ivar name: The name of the user Account. Names can contain any Unicode characters up to a maximum length of 20. Required. @@ -10912,8 +10881,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class VirtualMachineConfiguration(_Model): - """The configuration for Compute Nodes in a Pool based on the Azure Virtual - Machines infrastructure. + """The configuration for Compute Nodes in a Pool based on the Azure Virtual Machines + infrastructure. :ivar image_reference: A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use. Required. @@ -10944,10 +10913,10 @@ class VirtualMachineConfiguration(_Model): `_. :vartype data_disks: list[~azure.batch.models.DataDisk] :ivar license_type: This only applies to Images that contain the Windows operating system, and - should only be used when you hold valid on-premises licenses for the Compute Nodes which will - be deployed. If omitted, no on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. + should only be used when you hold valid on-premises licenses for the Compute + Nodes which will be deployed. If omitted, no on-premises licensing discount is + applied. Values are: Windows_Server (the on-premises license is for Windows + Server) and Windows_Client (the on-premises license is for Windows Client). :vartype license_type: str :ivar container_configuration: The container configuration for the Pool. If specified, setup is performed on each Compute Node in the Pool to allow Tasks to run in containers. All regular @@ -11012,11 +10981,11 @@ class VirtualMachineConfiguration(_Model): license_type: Optional[str] = rest_field( name="licenseType", visibility=["read", "create", "update", "delete", "query"] ) - """This only applies to Images that contain the Windows operating system, and should only be - used when you hold valid on-premises licenses for the Compute Nodes which will be deployed. If - omitted, no on-premises licensing discount is applied. Values are: Windows_Server - The - on-premises license is for Windows Server. Windows_Client - The on-premises license is for - Windows Client.""" + """This only applies to Images that contain the Windows operating system, and + should only be used when you hold valid on-premises licenses for the Compute + Nodes which will be deployed. If omitted, no on-premises licensing discount is + applied. Values are: Windows_Server (the on-premises license is for Windows + Server) and Windows_Client (the on-premises license is for Windows Client).""" container_configuration: Optional["_models.BatchContainerConfiguration"] = rest_field( name="containerConfiguration", visibility=["read", "create", "update", "delete", "query"] ) diff --git a/sdk/batch/azure-batch/azure/batch/models/_patch.py b/sdk/batch/azure-batch/azure/batch/models/_patch.py index cae277a3313c..2d7154cae4a5 100644 --- a/sdk/batch/azure-batch/azure/batch/models/_patch.py +++ b/sdk/batch/azure-batch/azure/batch/models/_patch.py @@ -21,10 +21,12 @@ class CreateTasksError(HttpResponseError): """Aggregate Exception containing details for any failures from a task add operation. - :param str message: Error message describing exit reason - :param [~TaskAddParameter] pending_tasks: List of tasks remaining to be submitted. - :param [~TaskAddResult] failure_tasks: List of tasks which failed to add - :param [~Exception] errors: List of unknown errors forcing early termination + :param pending_tasks: List of tasks remaining to be submitted. + :type pending_tasks: Optional[List[~_models.BatchTaskCreateOptions]] + :param failure_tasks: List of tasks which failed to add + :type failure_tasks: Optional[List[~_models.BatchTaskCreateResult]] + :param errors: List of unknown errors forcing early termination + :type errors: Optional[List[~Exception]] """ def __init__(self, pending_tasks=None, failure_tasks=None, errors=None) -> None: diff --git a/sdk/batch/azure-batch/migration_guide.md b/sdk/batch/azure-batch/migration_guide.md index 9f17c775a9c6..8fd16f0819ee 100644 --- a/sdk/batch/azure-batch/migration_guide.md +++ b/sdk/batch/azure-batch/migration_guide.md @@ -308,7 +308,7 @@ for i in range(10): result = client.create_tasks( job_id="my-job", task_collection=tasks, - concurrencies=4 # Use 4 parallel threads for submission + max_concurrency=4 # Use 4 parallel threads for submission ) ``` diff --git a/sdk/batch/azure-batch/pyproject.toml b/sdk/batch/azure-batch/pyproject.toml index 28a36198653b..173ba8ba5601 100644 --- a/sdk/batch/azure-batch/pyproject.toml +++ b/sdk/batch/azure-batch/pyproject.toml @@ -17,7 +17,7 @@ authors = [ description = "Microsoft Corporation Azure Batch Client Library for Python" license = "MIT" classifiers = [ - "Development Status :: 4 - Beta", + "Development Status :: 5 - Production/Stable", "Programming Language :: Python", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3", diff --git a/sdk/batch/azure-batch/tests/test_batch.py b/sdk/batch/azure-batch/tests/test_batch.py index 02f3f7f920eb..9d9d9358cc24 100644 --- a/sdk/batch/azure-batch/tests/test_batch.py +++ b/sdk/batch/azure-batch/tests/test_batch.py @@ -173,7 +173,7 @@ async def test_batch_create_pools(self, client: BatchClient, **kwargs): "InvalidPropertyValue", client.create_pool, pool=test_network_pool, - timeout=45, + service_timeout=45, ) test_image_pool = models.BatchPoolCreateOptions( @@ -191,7 +191,9 @@ async def test_batch_create_pools(self, client: BatchClient, **kwargs): node_agent_sku_id="batch.node.ubuntu 22.04", ), ) - await self.assertBatchError("InvalidPropertyValue", client.create_pool, pool=test_image_pool, timeout=45) + await self.assertBatchError( + "InvalidPropertyValue", client.create_pool, pool=test_image_pool, service_timeout=45 + ) # Test Create Pool with Data Disk data_disk = models.DataDisk(logical_unit_number=1, disk_size_gb=50) @@ -947,7 +949,7 @@ async def test_batch_compute_node_user(self, client: BatchClient, **kwargs): assert response is None # Test Update User - user = models.BatchNodeUserUpdateOptions(password="liilef#$DdRGSa_ewkjh") + user = models.BatchNodeUserReplaceOptions(password="liilef#$DdRGSa_ewkjh") response = await wrap_result(client.replace_node_user(batch_pool.name, nodes[0].id, user_name, user)) assert response is None @@ -1202,9 +1204,9 @@ async def test_batch_tasks(self, client: BatchClient, **kwargs): ) result = await wrap_result(client.create_tasks(batch_job.id, task_collection=tasks)) assert isinstance(result, models.BatchCreateTaskCollectionResult) - assert result.values_property is not None - assert len(result.values_property) == 3 - assert result.values_property[0].status.lower() == models.BatchTaskAddStatus.SUCCESS + assert result.result_values is not None + assert len(result.result_values) == 3 + assert result.result_values[0].status.lower() == models.BatchTaskAddStatus.SUCCESS # Test List Tasks tasks = list(await wrap_list_result(client.list_tasks(batch_job.id))) @@ -1270,7 +1272,7 @@ async def test_batch_tasks(self, client: BatchClient, **kwargs): client.create_tasks, batch_job.id, tasks_to_add, - concurrencies=3, + max_concurrency=3, ) # Test Bulk Add Task Success @@ -1292,10 +1294,10 @@ async def test_batch_tasks(self, client: BatchClient, **kwargs): tasks_to_add.append(task) result = await wrap_result(client.create_tasks(batch_job.id, tasks_to_add)) assert isinstance(result, models.BatchCreateTaskCollectionResult) - assert result.values_property is not None - assert len(result.values_property) == 733 - assert result.values_property[0].status.lower() == models.BatchTaskAddStatus.SUCCESS - assert all(t.status.lower() == models.BatchTaskAddStatus.SUCCESS for t in result.values_property) + assert result.result_values is not None + assert len(result.result_values) == 733 + assert result.result_values[0].status.lower() == models.BatchTaskAddStatus.SUCCESS + assert all(t.status.lower() == models.BatchTaskAddStatus.SUCCESS for t in result.result_values) @CachedResourceGroupPreparer(location=AZURE_LOCATION) @AccountPreparer(location=AZURE_LOCATION, batch_environment=BATCH_ENVIRONMENT) @@ -1330,7 +1332,7 @@ async def test_batch_jobs(self, client: BatchClient, **kwargs): now = datetime.datetime.now(datetime.timezone.utc) - response = await wrap_result(client.create_job(job=job_param, ocpdate=now)) + response = await wrap_result(client.create_job(job=job_param, ocp_date=now)) assert response is None # Test Update Job diff --git a/sdk/batch/azure-batch/tsp-location.yaml b/sdk/batch/azure-batch/tsp-location.yaml index c5c2f791eedd..dbc0a0b5da66 100644 --- a/sdk/batch/azure-batch/tsp-location.yaml +++ b/sdk/batch/azure-batch/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/batch/data-plane/Batch -commit: 93edc09ba3d879875fe0fcca6db36a61ff27b2d6 +commit: 0225fe39a09e7b1e26cd4796e139eae44c883cee repo: Azure/azure-rest-api-specs additionalDirectories: