From cd6487b96f14b329906f440ee1cdf30549615b39 Mon Sep 17 00:00:00 2001 From: hivyas Date: Tue, 17 Nov 2020 14:58:05 -0800 Subject: [PATCH 01/64] added lva sdk package --- sdk/media/azure-media-lva-edge/CHANGELOG.md | 8 + sdk/media/azure-media-lva-edge/MANIFEST.in | 4 + sdk/media/azure-media-lva-edge/README.md | 38 + .../azure-media-lva-edge/azure/__init__.py | 7 + .../azure/media/lva/edge/__init__.py | 20 + .../media/lva/edge/_generated/__init__.py | 1 + .../media/lva/edge/_generated/_version.py | 9 + .../lva/edge/_generated/models/__init__.py | 199 ++ ...r_live_video_analyticson_io_tedge_enums.py | 108 + .../lva/edge/_generated/models/_models.py | 2008 +++++++++++++++ .../lva/edge/_generated/models/_models_py3.py | 2185 +++++++++++++++++ .../azure/media/lva/edge/_generated/py.typed | 1 + .../azure/media/lva/edge/_version.py | 7 + .../azure-media-lva-edge/dev_requirements.txt | 11 + .../samples/sample_conditional_async.py | 48 + .../samples/sample_hello_world.py | 35 + .../samples/sample_lva.py | 83 + .../azure-media-lva-edge/sdk_packaging.toml | 4 + sdk/media/azure-media-lva-edge/setup.cfg | 2 + sdk/media/azure-media-lva-edge/setup.py | 102 + .../azure-media-lva-edge/swagger/README.md | 26 + .../swagger/appconfiguration.json | 1239 ++++++++++ .../swagger/commandOutput.txt | 158 ++ .../tests/_shared/asynctestcase.py | 79 + .../tests/_shared/testcase.py | 0 .../azure-media-lva-edge/tests/conftest.py | 25 + .../tests/test_app_config.py | 1 + sdk/media/ci.yml | 3 + 28 files changed, 6411 insertions(+) create mode 100644 sdk/media/azure-media-lva-edge/CHANGELOG.md create mode 100644 sdk/media/azure-media-lva-edge/MANIFEST.in create mode 100644 sdk/media/azure-media-lva-edge/README.md create mode 100644 sdk/media/azure-media-lva-edge/azure/__init__.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/__init__.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/_version.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/__init__.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models_py3.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/py.typed create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_version.py create mode 100644 sdk/media/azure-media-lva-edge/dev_requirements.txt create mode 100644 sdk/media/azure-media-lva-edge/samples/sample_conditional_async.py create mode 100644 sdk/media/azure-media-lva-edge/samples/sample_hello_world.py create mode 100644 sdk/media/azure-media-lva-edge/samples/sample_lva.py create mode 100644 sdk/media/azure-media-lva-edge/sdk_packaging.toml create mode 100644 sdk/media/azure-media-lva-edge/setup.cfg create mode 100644 sdk/media/azure-media-lva-edge/setup.py create mode 100644 sdk/media/azure-media-lva-edge/swagger/README.md create mode 100644 sdk/media/azure-media-lva-edge/swagger/appconfiguration.json create mode 100644 sdk/media/azure-media-lva-edge/swagger/commandOutput.txt create mode 100644 sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py create mode 100644 sdk/media/azure-media-lva-edge/tests/_shared/testcase.py create mode 100644 sdk/media/azure-media-lva-edge/tests/conftest.py create mode 100644 sdk/media/azure-media-lva-edge/tests/test_app_config.py diff --git a/sdk/media/azure-media-lva-edge/CHANGELOG.md b/sdk/media/azure-media-lva-edge/CHANGELOG.md new file mode 100644 index 000000000000..816f21db092e --- /dev/null +++ b/sdk/media/azure-media-lva-edge/CHANGELOG.md @@ -0,0 +1,8 @@ + +# Release History + +------------------- + +## 0.0.1 (Unreleased) + +- Training day! diff --git a/sdk/media/azure-media-lva-edge/MANIFEST.in b/sdk/media/azure-media-lva-edge/MANIFEST.in new file mode 100644 index 000000000000..7ebdd947f8ff --- /dev/null +++ b/sdk/media/azure-media-lva-edge/MANIFEST.in @@ -0,0 +1,4 @@ +recursive-include tests *.py +include *.md +include azure/__init__.py +recursive-include samples *.py *.md diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md new file mode 100644 index 000000000000..c5012d4038c9 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/README.md @@ -0,0 +1,38 @@ +# Azure App Configuration client library for Python SDK Training + +Azure App Configuration is a managed service that helps developers centralize their application configurations simply and securely. + +Modern programs, especially programs running in a cloud, generally have many components that are distributed in nature. Spreading configuration settings across these components can lead to hard-to-troubleshoot errors during an application deployment. Use App Configuration to securely store all the settings for your application in one place. + +Use the client library for App Configuration to create and manage application configuration settings. + +## Prerequisites + +* Python 2.7, or 3.5 or later is required to use this package. +* You need an [Azure subscription][azure_sub], and a [Configuration Store][configuration_store] to use this package. + +To create a Configuration Store, you can use the Azure Portal or [Azure CLI][azure_cli]. + +After that, create the Configuration Store: + +```Powershell +az appconfig create --name --resource-group --location eastus +``` + + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, +see the Code of Conduct FAQ or contact opencode@microsoft.com with any +additional questions or comments. diff --git a/sdk/media/azure-media-lva-edge/azure/__init__.py b/sdk/media/azure-media-lva-edge/azure/__init__.py new file mode 100644 index 000000000000..0e40e134bdac --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/__init__.py @@ -0,0 +1,7 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- + +__path__ = __import__("pkgutil").extend_path(__path__, __name__) \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py new file mode 100644 index 000000000000..725cd6860541 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py @@ -0,0 +1,20 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore +from azure.media.lva.edge._generated.models import MediaGraphTopologySetRequestBody, MediaGraphTopologySetRequest, MediaGraphInstanceSetRequest, MediaGraphInstanceSetRequestBody + +def _OverrideTopologySetRequestSerialize(self): + graph_body = MediaGraphTopologySetRequestBody(name=self.graph.name) + graph_body.system_data = self.graph.system_data + graph_body.properties = self.graph.properties + + return graph_body.serialize() + +MediaGraphTopologySetRequest.serialize = _OverrideTopologySetRequestSerialize + +def _OverrideInstanceSetRequestSerialize(self): + graph_body = MediaGraphInstanceSetRequestBody(name=self.instance.name) + graph_body.system_data = self.instance.system_data + graph_body.properties = self.instance.properties + + return graph_body.serialize() + +MediaGraphInstanceSetRequest.serialize = _OverrideInstanceSetRequestSerialize \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/__init__.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/__init__.py new file mode 100644 index 000000000000..5960c353a898 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/_version.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/_version.py new file mode 100644 index 000000000000..31ed98425268 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "1.0" diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/__init__.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/__init__.py new file mode 100644 index 000000000000..2e389ab8ef9d --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/__init__.py @@ -0,0 +1,199 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import ItemNonSetRequestBase + from ._models_py3 import MediaGraphAssetSink + from ._models_py3 import MediaGraphCertificateSource + from ._models_py3 import MediaGraphCognitiveServicesVisionExtension + from ._models_py3 import MediaGraphCredentials + from ._models_py3 import MediaGraphEndpoint + from ._models_py3 import MediaGraphExtensionProcessorBase + from ._models_py3 import MediaGraphFileSink + from ._models_py3 import MediaGraphFrameRateFilterProcessor + from ._models_py3 import MediaGraphGrpcExtension + from ._models_py3 import MediaGraphGrpcExtensionDataTransfer + from ._models_py3 import MediaGraphHttpExtension + from ._models_py3 import MediaGraphHttpHeaderCredentials + from ._models_py3 import MediaGraphImage + from ._models_py3 import MediaGraphImageFormat + from ._models_py3 import MediaGraphImageFormatEncoded + from ._models_py3 import MediaGraphImageFormatRaw + from ._models_py3 import MediaGraphImageScale + from ._models_py3 import MediaGraphInstance + from ._models_py3 import MediaGraphInstanceActivateRequest + from ._models_py3 import MediaGraphInstanceCollection + from ._models_py3 import MediaGraphInstanceDeActivateRequest + from ._models_py3 import MediaGraphInstanceDeleteRequest + from ._models_py3 import MediaGraphInstanceGetRequest + from ._models_py3 import MediaGraphInstanceListRequest + from ._models_py3 import MediaGraphInstanceProperties + from ._models_py3 import MediaGraphInstanceSetRequest + from ._models_py3 import MediaGraphInstanceSetRequestBody + from ._models_py3 import MediaGraphIoTHubMessageSink + from ._models_py3 import MediaGraphIoTHubMessageSource + from ._models_py3 import MediaGraphMotionDetectionProcessor + from ._models_py3 import MediaGraphNodeInput + from ._models_py3 import MediaGraphOutputSelector + from ._models_py3 import MediaGraphParameterDeclaration + from ._models_py3 import MediaGraphParameterDefinition + from ._models_py3 import MediaGraphPemCertificateList + from ._models_py3 import MediaGraphProcessor + from ._models_py3 import MediaGraphRtspSource + from ._models_py3 import MediaGraphSignalGateProcessor + from ._models_py3 import MediaGraphSink + from ._models_py3 import MediaGraphSource + from ._models_py3 import MediaGraphSystemData + from ._models_py3 import MediaGraphTlsEndpoint + from ._models_py3 import MediaGraphTlsValidationOptions + from ._models_py3 import MediaGraphTopology + from ._models_py3 import MediaGraphTopologyCollection + from ._models_py3 import MediaGraphTopologyDeleteRequest + from ._models_py3 import MediaGraphTopologyGetRequest + from ._models_py3 import MediaGraphTopologyListRequest + from ._models_py3 import MediaGraphTopologyProperties + from ._models_py3 import MediaGraphTopologySetRequest + from ._models_py3 import MediaGraphTopologySetRequestBody + from ._models_py3 import MediaGraphUnsecuredEndpoint + from ._models_py3 import MediaGraphUsernamePasswordCredentials + from ._models_py3 import OperationBase +except (SyntaxError, ImportError): + from ._models import ItemNonSetRequestBase # type: ignore + from ._models import MediaGraphAssetSink # type: ignore + from ._models import MediaGraphCertificateSource # type: ignore + from ._models import MediaGraphCognitiveServicesVisionExtension # type: ignore + from ._models import MediaGraphCredentials # type: ignore + from ._models import MediaGraphEndpoint # type: ignore + from ._models import MediaGraphExtensionProcessorBase # type: ignore + from ._models import MediaGraphFileSink # type: ignore + from ._models import MediaGraphFrameRateFilterProcessor # type: ignore + from ._models import MediaGraphGrpcExtension # type: ignore + from ._models import MediaGraphGrpcExtensionDataTransfer # type: ignore + from ._models import MediaGraphHttpExtension # type: ignore + from ._models import MediaGraphHttpHeaderCredentials # type: ignore + from ._models import MediaGraphImage # type: ignore + from ._models import MediaGraphImageFormat # type: ignore + from ._models import MediaGraphImageFormatEncoded # type: ignore + from ._models import MediaGraphImageFormatRaw # type: ignore + from ._models import MediaGraphImageScale # type: ignore + from ._models import MediaGraphInstance # type: ignore + from ._models import MediaGraphInstanceActivateRequest # type: ignore + from ._models import MediaGraphInstanceCollection # type: ignore + from ._models import MediaGraphInstanceDeActivateRequest # type: ignore + from ._models import MediaGraphInstanceDeleteRequest # type: ignore + from ._models import MediaGraphInstanceGetRequest # type: ignore + from ._models import MediaGraphInstanceListRequest # type: ignore + from ._models import MediaGraphInstanceProperties # type: ignore + from ._models import MediaGraphInstanceSetRequest # type: ignore + from ._models import MediaGraphInstanceSetRequestBody # type: ignore + from ._models import MediaGraphIoTHubMessageSink # type: ignore + from ._models import MediaGraphIoTHubMessageSource # type: ignore + from ._models import MediaGraphMotionDetectionProcessor # type: ignore + from ._models import MediaGraphNodeInput # type: ignore + from ._models import MediaGraphOutputSelector # type: ignore + from ._models import MediaGraphParameterDeclaration # type: ignore + from ._models import MediaGraphParameterDefinition # type: ignore + from ._models import MediaGraphPemCertificateList # type: ignore + from ._models import MediaGraphProcessor # type: ignore + from ._models import MediaGraphRtspSource # type: ignore + from ._models import MediaGraphSignalGateProcessor # type: ignore + from ._models import MediaGraphSink # type: ignore + from ._models import MediaGraphSource # type: ignore + from ._models import MediaGraphSystemData # type: ignore + from ._models import MediaGraphTlsEndpoint # type: ignore + from ._models import MediaGraphTlsValidationOptions # type: ignore + from ._models import MediaGraphTopology # type: ignore + from ._models import MediaGraphTopologyCollection # type: ignore + from ._models import MediaGraphTopologyDeleteRequest # type: ignore + from ._models import MediaGraphTopologyGetRequest # type: ignore + from ._models import MediaGraphTopologyListRequest # type: ignore + from ._models import MediaGraphTopologyProperties # type: ignore + from ._models import MediaGraphTopologySetRequest # type: ignore + from ._models import MediaGraphTopologySetRequestBody # type: ignore + from ._models import MediaGraphUnsecuredEndpoint # type: ignore + from ._models import MediaGraphUsernamePasswordCredentials # type: ignore + from ._models import OperationBase # type: ignore + +from ._definitionsfor_live_video_analyticson_io_tedge_enums import ( + MediaGraphGrpcExtensionDataTransferMode, + MediaGraphImageEncodingFormat, + MediaGraphImageFormatRawPixelFormat, + MediaGraphImageScaleMode, + MediaGraphInstanceState, + MediaGraphMotionDetectionSensitivity, + MediaGraphOutputSelectorOperator, + MediaGraphParameterType, + MediaGraphRtspTransport, +) + +__all__ = [ + 'ItemNonSetRequestBase', + 'MediaGraphAssetSink', + 'MediaGraphCertificateSource', + 'MediaGraphCognitiveServicesVisionExtension', + 'MediaGraphCredentials', + 'MediaGraphEndpoint', + 'MediaGraphExtensionProcessorBase', + 'MediaGraphFileSink', + 'MediaGraphFrameRateFilterProcessor', + 'MediaGraphGrpcExtension', + 'MediaGraphGrpcExtensionDataTransfer', + 'MediaGraphHttpExtension', + 'MediaGraphHttpHeaderCredentials', + 'MediaGraphImage', + 'MediaGraphImageFormat', + 'MediaGraphImageFormatEncoded', + 'MediaGraphImageFormatRaw', + 'MediaGraphImageScale', + 'MediaGraphInstance', + 'MediaGraphInstanceActivateRequest', + 'MediaGraphInstanceCollection', + 'MediaGraphInstanceDeActivateRequest', + 'MediaGraphInstanceDeleteRequest', + 'MediaGraphInstanceGetRequest', + 'MediaGraphInstanceListRequest', + 'MediaGraphInstanceProperties', + 'MediaGraphInstanceSetRequest', + 'MediaGraphInstanceSetRequestBody', + 'MediaGraphIoTHubMessageSink', + 'MediaGraphIoTHubMessageSource', + 'MediaGraphMotionDetectionProcessor', + 'MediaGraphNodeInput', + 'MediaGraphOutputSelector', + 'MediaGraphParameterDeclaration', + 'MediaGraphParameterDefinition', + 'MediaGraphPemCertificateList', + 'MediaGraphProcessor', + 'MediaGraphRtspSource', + 'MediaGraphSignalGateProcessor', + 'MediaGraphSink', + 'MediaGraphSource', + 'MediaGraphSystemData', + 'MediaGraphTlsEndpoint', + 'MediaGraphTlsValidationOptions', + 'MediaGraphTopology', + 'MediaGraphTopologyCollection', + 'MediaGraphTopologyDeleteRequest', + 'MediaGraphTopologyGetRequest', + 'MediaGraphTopologyListRequest', + 'MediaGraphTopologyProperties', + 'MediaGraphTopologySetRequest', + 'MediaGraphTopologySetRequestBody', + 'MediaGraphUnsecuredEndpoint', + 'MediaGraphUsernamePasswordCredentials', + 'OperationBase', + 'MediaGraphGrpcExtensionDataTransferMode', + 'MediaGraphImageEncodingFormat', + 'MediaGraphImageFormatRawPixelFormat', + 'MediaGraphImageScaleMode', + 'MediaGraphInstanceState', + 'MediaGraphMotionDetectionSensitivity', + 'MediaGraphOutputSelectorOperator', + 'MediaGraphParameterType', + 'MediaGraphRtspTransport', +] diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py new file mode 100644 index 000000000000..6e78e4728244 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum, EnumMeta +from six import with_metaclass + +class _CaseInsensitiveEnumMeta(EnumMeta): + def __getitem__(self, name): + return super().__getitem__(name.upper()) + + def __getattr__(cls, name): + """Return the enum member matching `name` + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + """ + try: + return cls._member_map_[name.upper()] + except KeyError: + raise AttributeError(name) + + +class MediaGraphGrpcExtensionDataTransferMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """How frame data should be transmitted to the inferencing engine. + """ + + EMBEDDED = "Embedded" #: Frames are transferred embedded into the gRPC messages. + SHARED_MEMORY = "SharedMemory" #: Frames are transferred through shared memory. + +class MediaGraphImageEncodingFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The different encoding formats that can be used for the image. + """ + + JPEG = "Jpeg" #: JPEG image format. + BMP = "Bmp" #: BMP image format. + PNG = "Png" #: PNG image format. + +class MediaGraphImageFormatRawPixelFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """pixel format + """ + + YUV420_P = "Yuv420p" #: Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples). + RGB565_BE = "Rgb565be" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian. + RGB565_LE = "Rgb565le" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian. + RGB555_BE = "Rgb555be" #: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined. + RGB555_LE = "Rgb555le" #: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined. + RGB24 = "Rgb24" #: Packed RGB 8:8:8, 24bpp, RGBRGB. + BGR24 = "Bgr24" #: Packed RGB 8:8:8, 24bpp, BGRBGR. + ARGB = "Argb" #: Packed ARGB 8:8:8:8, 32bpp, ARGBARGB. + RGBA = "Rgba" #: Packed RGBA 8:8:8:8, 32bpp, RGBARGBA. + ABGR = "Abgr" #: Packed ABGR 8:8:8:8, 32bpp, ABGRABGR. + BGRA = "Bgra" #: Packed BGRA 8:8:8:8, 32bpp, BGRABGRA. + +class MediaGraphImageScaleMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Describes the modes for scaling an input video frame into an image, before it is sent to an + inference engine. + """ + + PRESERVE_ASPECT_RATIO = "PreserveAspectRatio" #: Use the same aspect ratio as the input frame. + PAD = "Pad" #: Center pad the input frame to match the given dimensions. + STRETCH = "Stretch" #: Stretch input frame to match given dimensions. + +class MediaGraphInstanceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Allowed states for a graph Instance. + """ + + INACTIVE = "Inactive" #: Inactive state. + ACTIVATING = "Activating" #: Activating state. + ACTIVE = "Active" #: Active state. + DEACTIVATING = "Deactivating" #: Deactivating state. + +class MediaGraphMotionDetectionSensitivity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Enumeration that specifies the sensitivity of the motion detection processor. + """ + + LOW = "Low" #: Low Sensitivity. + MEDIUM = "Medium" #: Medium Sensitivity. + HIGH = "High" #: High Sensitivity. + +class MediaGraphOutputSelectorOperator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The operator to compare streams by. + """ + + IS_ENUM = "is" #: A media type is the same type or a subtype. + IS_NOT = "isNot" #: A media type is not the same type or a subtype. + +class MediaGraphParameterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """name + """ + + STRING = "String" #: A string parameter value. + SECRET_STRING = "SecretString" #: A string to hold sensitive information as parameter value. + INT = "Int" #: A 32-bit signed integer as parameter value. + DOUBLE = "Double" #: A 64-bit double-precision floating point type as parameter value. + BOOL = "Bool" #: A boolean value that is either true or false. + +class MediaGraphRtspTransport(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Underlying RTSP transport. This is used to enable or disable HTTP tunneling. + """ + + HTTP = "Http" #: HTTP/HTTPS transport. This should be used when HTTP tunneling is desired. + TCP = "Tcp" #: TCP transport. This should be used when HTTP tunneling is NOT desired. diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models.py new file mode 100644 index 000000000000..62f58c7ea385 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models.py @@ -0,0 +1,2008 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import msrest.serialization + + +class OperationBase(msrest.serialization.Model): + """OperationBase. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + _subtype_map = { + 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(OperationBase, self).__init__(**kwargs) + self.method_name = None # type: Optional[str] + + +class ItemNonSetRequestBase(OperationBase): + """ItemNonSetRequestBase. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphInstanceActivateRequest, MediaGraphInstanceDeActivateRequest, MediaGraphInstanceDeleteRequest, MediaGraphInstanceGetRequest, MediaGraphTopologyDeleteRequest, MediaGraphTopologyGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(ItemNonSetRequestBase, self).__init__(**kwargs) + self.method_name = 'ItemNonSetRequestBase' # type: str + self.name = kwargs['name'] + + +class MediaGraphSink(msrest.serialization.Model): + """Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphAssetSink, MediaGraphFileSink, MediaGraphIoTHubMessageSink. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphAssetSink': 'MediaGraphAssetSink', '#Microsoft.Media.MediaGraphFileSink': 'MediaGraphFileSink', '#Microsoft.Media.MediaGraphIoTHubMessageSink': 'MediaGraphIoTHubMessageSink'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSink, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = kwargs['name'] + self.inputs = kwargs['inputs'] + + +class MediaGraphAssetSink(MediaGraphSink): + """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param asset_name_pattern: A name pattern when creating new assets. + :type asset_name_pattern: str + :param segment_length: When writing media to an asset, wait until at least this duration of + media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum + of 30 seconds and a recommended maximum of 5 minutes. + :type segment_length: ~datetime.timedelta + :param local_media_cache_path: Path to a local file system directory for temporary caching of + media, before writing to an Asset. Used when the Edge device is temporarily disconnected from + Azure. + :type local_media_cache_path: str + :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for + temporary caching of media. + :type local_media_cache_maximum_size_mi_b: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, + 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, + 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, + 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphAssetSink, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphAssetSink' # type: str + self.asset_name_pattern = kwargs.get('asset_name_pattern', None) + self.segment_length = kwargs.get('segment_length', None) + self.local_media_cache_path = kwargs.get('local_media_cache_path', None) + self.local_media_cache_maximum_size_mi_b = kwargs.get('local_media_cache_maximum_size_mi_b', None) + + +class MediaGraphCertificateSource(msrest.serialization.Model): + """Base class for certificate sources. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphPemCertificateList. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphPemCertificateList': 'MediaGraphPemCertificateList'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCertificateSource, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphProcessor(msrest.serialization.Model): + """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphProcessor, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = kwargs['name'] + self.inputs = kwargs['inputs'] + + +class MediaGraphExtensionProcessorBase(MediaGraphProcessor): + """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension': 'MediaGraphCognitiveServicesVisionExtension', '#Microsoft.Media.MediaGraphGrpcExtension': 'MediaGraphGrpcExtension', '#Microsoft.Media.MediaGraphHttpExtension': 'MediaGraphHttpExtension'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphExtensionProcessorBase, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str + self.endpoint = kwargs.get('endpoint', None) + self.image = kwargs.get('image', None) + + +class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCognitiveServicesVisionExtension, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension' # type: str + + +class MediaGraphCredentials(msrest.serialization.Model): + """Credentials to present during authentication. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphHttpHeaderCredentials, MediaGraphUsernamePasswordCredentials. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphHttpHeaderCredentials': 'MediaGraphHttpHeaderCredentials', '#Microsoft.Media.MediaGraphUsernamePasswordCredentials': 'MediaGraphUsernamePasswordCredentials'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCredentials, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphEndpoint(msrest.serialization.Model): + """Base class for endpoints. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphTlsEndpoint, MediaGraphUnsecuredEndpoint. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphTlsEndpoint': 'MediaGraphTlsEndpoint', '#Microsoft.Media.MediaGraphUnsecuredEndpoint': 'MediaGraphUnsecuredEndpoint'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphEndpoint, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.credentials = kwargs.get('credentials', None) + self.url = kwargs['url'] + + +class MediaGraphFileSink(MediaGraphSink): + """Enables a media graph to write/store media (video and audio) to a file on the Edge device. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param file_path_pattern: Required. Absolute file path pattern for creating new files on the + Edge device. + :type file_path_pattern: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + 'file_path_pattern': {'required': True, 'min_length': 1}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphFileSink, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str + self.file_path_pattern = kwargs['file_path_pattern'] + + +class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): + """Limits the frame rate on the input video stream based on the maximumFps property. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not + exceed this limit. + :type maximum_fps: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphFrameRateFilterProcessor, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str + self.maximum_fps = kwargs.get('maximum_fps', None) + + +class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + :param data_transfer: Required. How media should be transferred to the inferencing engine. + :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + 'data_transfer': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphGrpcExtension, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str + self.data_transfer = kwargs['data_transfer'] + + +class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): + """Describes how media should be transferred to the inferencing engine. + + All required parameters must be populated in order to send to Azure. + + :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if + mode is SharedMemory. Should not be specificed otherwise. + :type shared_memory_size_mi_b: str + :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible + values include: "Embedded", "SharedMemory". + :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode + """ + + _validation = { + 'mode': {'required': True}, + } + + _attribute_map = { + 'shared_memory_size_mi_b': {'key': 'sharedMemorySizeMiB', 'type': 'str'}, + 'mode': {'key': 'mode', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphGrpcExtensionDataTransfer, self).__init__(**kwargs) + self.shared_memory_size_mi_b = kwargs.get('shared_memory_size_mi_b', None) + self.mode = kwargs['mode'] + + +class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphHttpExtension, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphHttpExtension' # type: str + + +class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): + """Http header service credentials. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param header_name: Required. HTTP header name. + :type header_name: str + :param header_value: Required. HTTP header value. + :type header_value: str + """ + + _validation = { + 'type': {'required': True}, + 'header_name': {'required': True}, + 'header_value': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'header_name': {'key': 'headerName', 'type': 'str'}, + 'header_value': {'key': 'headerValue', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphHttpHeaderCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphHttpHeaderCredentials' # type: str + self.header_name = kwargs['header_name'] + self.header_value = kwargs['header_value'] + + +class MediaGraphImage(msrest.serialization.Model): + """Describes the properties of an image frame. + + :param scale: The scaling mode for the image. + :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale + :param format: Encoding settings for an image. + :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat + """ + + _attribute_map = { + 'scale': {'key': 'scale', 'type': 'MediaGraphImageScale'}, + 'format': {'key': 'format', 'type': 'MediaGraphImageFormat'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImage, self).__init__(**kwargs) + self.scale = kwargs.get('scale', None) + self.format = kwargs.get('format', None) + + +class MediaGraphImageFormat(msrest.serialization.Model): + """Encoding settings for an image. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormat, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphImageFormatEncoded(MediaGraphImageFormat): + """Allowed formats for the image. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param encoding: The different encoding formats that can be used for the image. Possible values + include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". + :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat + :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best + quality). + :type quality: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'encoding': {'key': 'encoding', 'type': 'str'}, + 'quality': {'key': 'quality', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str + self.encoding = kwargs.get('encoding', "Jpeg") + self.quality = kwargs.get('quality', None) + + +class MediaGraphImageFormatRaw(MediaGraphImageFormat): + """Encoding settings for raw images. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", + "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". + :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'pixel_format': {'key': 'pixelFormat', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormatRaw, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatRaw' # type: str + self.pixel_format = kwargs.get('pixel_format', None) + + +class MediaGraphImageScale(msrest.serialization.Model): + """The scaling mode for the image. + + :param mode: Describes the modes for scaling an input video frame into an image, before it is + sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". + :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode + :param width: The desired output width of the image. + :type width: str + :param height: The desired output height of the image. + :type height: str + """ + + _attribute_map = { + 'mode': {'key': 'mode', 'type': 'str'}, + 'width': {'key': 'width', 'type': 'str'}, + 'height': {'key': 'height', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageScale, self).__init__(**kwargs) + self.mode = kwargs.get('mode', None) + self.width = kwargs.get('width', None) + self.height = kwargs.get('height', None) + + +class MediaGraphInstance(msrest.serialization.Model): + """Represents a Media Graph instance. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Properties of a Media Graph instance. + :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstance, self).__init__(**kwargs) + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): + """MediaGraphInstanceActivateRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceActivateRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceActivate' # type: str + + +class MediaGraphInstanceCollection(msrest.serialization.Model): + """Collection of graph instances. + + :param value: Collection of graph instances. + :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] + :param continuation_token: Continuation token to use in subsequent calls to enumerate through + the graph instance collection (when the collection contains too many results to return in one + response). + :type continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[MediaGraphInstance]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceCollection, self).__init__(**kwargs) + self.value = kwargs.get('value', None) + self.continuation_token = kwargs.get('continuation_token', None) + + +class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): + """MediaGraphInstanceDeActivateRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceDeActivateRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceDeactivate' # type: str + + +class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): + """MediaGraphInstanceDeleteRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceDeleteRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceDelete' # type: str + + +class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): + """MediaGraphInstanceGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceGetRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceGet' # type: str + + +class MediaGraphInstanceListRequest(OperationBase): + """MediaGraphInstanceListRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceListRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceList' # type: str + + +class MediaGraphInstanceProperties(msrest.serialization.Model): + """Properties of a Media Graph instance. + + :param description: An optional description for the instance. + :type description: str + :param topology_name: The name of the graph topology that this instance will run. A topology + with this name should already have been set in the Edge module. + :type topology_name: str + :param parameters: List of one or more graph instance parameters. + :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] + :param state: Allowed states for a graph Instance. Possible values include: "Inactive", + "Activating", "Active", "Deactivating". + :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'topology_name': {'key': 'topologyName', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDefinition]'}, + 'state': {'key': 'state', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceProperties, self).__init__(**kwargs) + self.description = kwargs.get('description', None) + self.topology_name = kwargs.get('topology_name', None) + self.parameters = kwargs.get('parameters', None) + self.state = kwargs.get('state', None) + + +class MediaGraphInstanceSetRequest(OperationBase): + """MediaGraphInstanceSetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param instance: Required. Represents a Media Graph instance. + :type instance: ~azure.media.lva.edge.models.MediaGraphInstance + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'instance': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceSetRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceSet' # type: str + self.instance = kwargs['instance'] + + +class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): + """MediaGraphInstanceSetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Properties of a Media Graph instance. + :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceSetRequestBody, self).__init__(**kwargs) + self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str + self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class MediaGraphIoTHubMessageSink(MediaGraphSink): + """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param hub_output_name: Name of the output path to which the graph will publish message. These + messages can then be delivered to desired destinations by declaring routes referencing the + output path in the IoT Edge deployment manifest. + :type hub_output_name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'hub_output_name': {'key': 'hubOutputName', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphIoTHubMessageSink, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSink' # type: str + self.hub_output_name = kwargs.get('hub_output_name', None) + + +class MediaGraphSource(msrest.serialization.Model): + """Media graph source. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphIoTHubMessageSource': 'MediaGraphIoTHubMessageSource', '#Microsoft.Media.MediaGraphRtspSource': 'MediaGraphRtspSource'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSource, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = kwargs['name'] + + +class MediaGraphIoTHubMessageSource(MediaGraphSource): + """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + :param hub_input_name: Name of the input path where messages can be routed to (via routes + declared in the IoT Edge deployment manifest). + :type hub_input_name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'hub_input_name': {'key': 'hubInputName', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphIoTHubMessageSource, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSource' # type: str + self.hub_input_name = kwargs.get('hub_input_name', None) + + +class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): + """A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param sensitivity: Enumeration that specifies the sensitivity of the motion detection + processor. Possible values include: "Low", "Medium", "High". + :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity + :param output_motion_region: Indicates whether the processor should detect and output the + regions, within the video frame, where motion was detected. Default is true. + :type output_motion_region: bool + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, + 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphMotionDetectionProcessor, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str + self.sensitivity = kwargs.get('sensitivity', None) + self.output_motion_region = kwargs.get('output_motion_region', None) + + +class MediaGraphNodeInput(msrest.serialization.Model): + """Represents the input to any node in a media graph. + + :param node_name: The name of another node in the media graph, the output of which is used as + input to this node. + :type node_name: str + :param output_selectors: Allows for the selection of particular streams from another node. + :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] + """ + + _attribute_map = { + 'node_name': {'key': 'nodeName', 'type': 'str'}, + 'output_selectors': {'key': 'outputSelectors', 'type': '[MediaGraphOutputSelector]'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphNodeInput, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) + self.output_selectors = kwargs.get('output_selectors', None) + + +class MediaGraphOutputSelector(msrest.serialization.Model): + """Allows for the selection of particular streams from another node. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar property: The stream property to compare with. Default value: "mediaType". + :vartype property: str + :param operator: The operator to compare streams by. Possible values include: "is", "isNot". + :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator + :param value: Value to compare against. + :type value: str + """ + + _validation = { + 'property': {'constant': True}, + } + + _attribute_map = { + 'property': {'key': 'property', 'type': 'str'}, + 'operator': {'key': 'operator', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + property = "mediaType" + + def __init__( + self, + **kwargs + ): + super(MediaGraphOutputSelector, self).__init__(**kwargs) + self.operator = kwargs.get('operator', None) + self.value = kwargs.get('value', None) + + +class MediaGraphParameterDeclaration(msrest.serialization.Model): + """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the parameter. + :type name: str + :param type: Required. name. Possible values include: "String", "SecretString", "Int", + "Double", "Bool". + :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType + :param description: Description of the parameter. + :type description: str + :param default: The default value for the parameter, to be used if the graph instance does not + specify a value. + :type default: str + """ + + _validation = { + 'name': {'required': True, 'max_length': 64, 'min_length': 0}, + 'type': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'default': {'key': 'default', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphParameterDeclaration, self).__init__(**kwargs) + self.name = kwargs['name'] + self.type = kwargs['type'] + self.description = kwargs.get('description', None) + self.default = kwargs.get('default', None) + + +class MediaGraphParameterDefinition(msrest.serialization.Model): + """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. Name of parameter as defined in the graph topology. + :type name: str + :param value: Required. Value of parameter. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphParameterDefinition, self).__init__(**kwargs) + self.name = kwargs['name'] + self.value = kwargs['value'] + + +class MediaGraphPemCertificateList(MediaGraphCertificateSource): + """A list of PEM formatted certificates. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param certificates: Required. PEM formatted public certificates one per entry. + :type certificates: list[str] + """ + + _validation = { + 'type': {'required': True}, + 'certificates': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'certificates': {'key': 'certificates', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphPemCertificateList, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphPemCertificateList' # type: str + self.certificates = kwargs['certificates'] + + +class MediaGraphRtspSource(MediaGraphSource): + """Enables a graph to capture media from a RTSP server. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. + Possible values include: "Http", "Tcp". + :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport + :param endpoint: Required. RTSP endpoint of the stream that is being connected to. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'endpoint': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'transport': {'key': 'transport', 'type': 'str'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphRtspSource, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphRtspSource' # type: str + self.transport = kwargs.get('transport', None) + self.endpoint = kwargs['endpoint'] + + +class MediaGraphSignalGateProcessor(MediaGraphProcessor): + """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param activation_evaluation_window: The period of time over which the gate gathers input + events, before evaluating them. + :type activation_evaluation_window: str + :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It + is an offset between the time the event is received, and the timestamp of the first media + sample (eg. video frame) that is allowed through by the gate. + :type activation_signal_offset: str + :param minimum_activation_time: The minimum period for which the gate remains open, in the + absence of subsequent triggers (events). + :type minimum_activation_time: str + :param maximum_activation_time: The maximum period for which the gate remains open, in the + presence of subsequent events. + :type maximum_activation_time: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'activation_evaluation_window': {'key': 'activationEvaluationWindow', 'type': 'str'}, + 'activation_signal_offset': {'key': 'activationSignalOffset', 'type': 'str'}, + 'minimum_activation_time': {'key': 'minimumActivationTime', 'type': 'str'}, + 'maximum_activation_time': {'key': 'maximumActivationTime', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSignalGateProcessor, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphSignalGateProcessor' # type: str + self.activation_evaluation_window = kwargs.get('activation_evaluation_window', None) + self.activation_signal_offset = kwargs.get('activation_signal_offset', None) + self.minimum_activation_time = kwargs.get('minimum_activation_time', None) + self.maximum_activation_time = kwargs.get('maximum_activation_time', None) + + +class MediaGraphSystemData(msrest.serialization.Model): + """Graph system data. + + :param created_at: The timestamp of resource creation (UTC). + :type created_at: ~datetime.datetime + :param last_modified_at: The timestamp of resource last modification (UTC). + :type last_modified_at: ~datetime.datetime + """ + + _attribute_map = { + 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, + 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSystemData, self).__init__(**kwargs) + self.created_at = kwargs.get('created_at', None) + self.last_modified_at = kwargs.get('last_modified_at', None) + + +class MediaGraphTlsEndpoint(MediaGraphEndpoint): + """An endpoint that the graph can connect to, which must be connected over TLS/SSL. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null + designates that Azure Media Service's source of trust should be used. + :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource + :param validation_options: Validation options to use when authenticating a TLS connection. By + default, strict validation is used. + :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'MediaGraphCertificateSource'}, + 'validation_options': {'key': 'validationOptions', 'type': 'MediaGraphTlsValidationOptions'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTlsEndpoint, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphTlsEndpoint' # type: str + self.trusted_certificates = kwargs.get('trusted_certificates', None) + self.validation_options = kwargs.get('validation_options', None) + + +class MediaGraphTlsValidationOptions(msrest.serialization.Model): + """Options for controlling the authentication of TLS endpoints. + + :param ignore_hostname: Boolean value ignoring the host name (common name) during validation. + :type ignore_hostname: str + :param ignore_signature: Boolean value ignoring the integrity of the certificate chain at the + current time. + :type ignore_signature: str + """ + + _attribute_map = { + 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, + 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTlsValidationOptions, self).__init__(**kwargs) + self.ignore_hostname = kwargs.get('ignore_hostname', None) + self.ignore_signature = kwargs.get('ignore_signature', None) + + +class MediaGraphTopology(msrest.serialization.Model): + """Describes a graph topology. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Describes the properties of a graph topology. + :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopology, self).__init__(**kwargs) + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class MediaGraphTopologyCollection(msrest.serialization.Model): + """Collection of graph topologies. + + :param value: Collection of graph topologies. + :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] + :param continuation_token: Continuation token to use in subsequent calls to enumerate through + the graph topologies collection (when the collection contains too many results to return in one + response). + :type continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[MediaGraphTopology]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyCollection, self).__init__(**kwargs) + self.value = kwargs.get('value', None) + self.continuation_token = kwargs.get('continuation_token', None) + + +class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): + """MediaGraphTopologyDeleteRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyDeleteRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologyDelete' # type: str + + +class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): + """MediaGraphTopologyGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyGetRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologyGet' # type: str + + +class MediaGraphTopologyListRequest(OperationBase): + """MediaGraphTopologyListRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyListRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologyList' # type: str + + +class MediaGraphTopologyProperties(msrest.serialization.Model): + """Describes the properties of a graph topology. + + :param description: An optional description for the instance. + :type description: str + :param parameters: An optional description for the instance. + :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] + :param sources: An optional description for the instance. + :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] + :param processors: An optional description for the instance. + :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] + :param sinks: name. + :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDeclaration]'}, + 'sources': {'key': 'sources', 'type': '[MediaGraphSource]'}, + 'processors': {'key': 'processors', 'type': '[MediaGraphProcessor]'}, + 'sinks': {'key': 'sinks', 'type': '[MediaGraphSink]'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyProperties, self).__init__(**kwargs) + self.description = kwargs.get('description', None) + self.parameters = kwargs.get('parameters', None) + self.sources = kwargs.get('sources', None) + self.processors = kwargs.get('processors', None) + self.sinks = kwargs.get('sinks', None) + + +class MediaGraphTopologySetRequest(OperationBase): + """MediaGraphTopologySetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param graph: Required. Describes a graph topology. + :type graph: ~azure.media.lva.edge.models.MediaGraphTopology + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'graph': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologySetRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologySet' # type: str + self.graph = kwargs['graph'] + + +class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): + """MediaGraphTopologySetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Describes the properties of a graph topology. + :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologySetRequestBody, self).__init__(**kwargs) + self.method_name = 'MediaGraphTopologySetRequestBody' # type: str + self.method_name = 'MediaGraphTopologySetRequestBody' # type: str + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): + """An endpoint that the media graph can connect to, with no encryption in transit. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphUnsecuredEndpoint, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphUnsecuredEndpoint' # type: str + + +class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): + """Username/password credential pair. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param username: Required. Username for a username/password pair. + :type username: str + :param password: Password for a username/password pair. + :type password: str + """ + + _validation = { + 'type': {'required': True}, + 'username': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'username': {'key': 'username', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphUsernamePasswordCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphUsernamePasswordCredentials' # type: str + self.username = kwargs['username'] + self.password = kwargs.get('password', None) diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models_py3.py new file mode 100644 index 000000000000..5de3adde8e11 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models_py3.py @@ -0,0 +1,2185 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +from typing import List, Optional, Union + +import msrest.serialization + +from ._definitionsfor_live_video_analyticson_io_tedge_enums import * + + +class OperationBase(msrest.serialization.Model): + """OperationBase. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + _subtype_map = { + 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(OperationBase, self).__init__(**kwargs) + self.method_name = None # type: Optional[str] + + +class ItemNonSetRequestBase(OperationBase): + """ItemNonSetRequestBase. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphInstanceActivateRequest, MediaGraphInstanceDeActivateRequest, MediaGraphInstanceDeleteRequest, MediaGraphInstanceGetRequest, MediaGraphTopologyDeleteRequest, MediaGraphTopologyGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(ItemNonSetRequestBase, self).__init__(**kwargs) + self.method_name = 'ItemNonSetRequestBase' # type: str + self.name = name + + +class MediaGraphSink(msrest.serialization.Model): + """Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphAssetSink, MediaGraphFileSink, MediaGraphIoTHubMessageSink. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphAssetSink': 'MediaGraphAssetSink', '#Microsoft.Media.MediaGraphFileSink': 'MediaGraphFileSink', '#Microsoft.Media.MediaGraphIoTHubMessageSink': 'MediaGraphIoTHubMessageSink'} + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + **kwargs + ): + super(MediaGraphSink, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = name + self.inputs = inputs + + +class MediaGraphAssetSink(MediaGraphSink): + """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param asset_name_pattern: A name pattern when creating new assets. + :type asset_name_pattern: str + :param segment_length: When writing media to an asset, wait until at least this duration of + media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum + of 30 seconds and a recommended maximum of 5 minutes. + :type segment_length: ~datetime.timedelta + :param local_media_cache_path: Path to a local file system directory for temporary caching of + media, before writing to an Asset. Used when the Edge device is temporarily disconnected from + Azure. + :type local_media_cache_path: str + :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for + temporary caching of media. + :type local_media_cache_maximum_size_mi_b: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, + 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, + 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, + 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + asset_name_pattern: Optional[str] = None, + segment_length: Optional[datetime.timedelta] = None, + local_media_cache_path: Optional[str] = None, + local_media_cache_maximum_size_mi_b: Optional[str] = None, + **kwargs + ): + super(MediaGraphAssetSink, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphAssetSink' # type: str + self.asset_name_pattern = asset_name_pattern + self.segment_length = segment_length + self.local_media_cache_path = local_media_cache_path + self.local_media_cache_maximum_size_mi_b = local_media_cache_maximum_size_mi_b + + +class MediaGraphCertificateSource(msrest.serialization.Model): + """Base class for certificate sources. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphPemCertificateList. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphPemCertificateList': 'MediaGraphPemCertificateList'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCertificateSource, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphProcessor(msrest.serialization.Model): + """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + **kwargs + ): + super(MediaGraphProcessor, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = name + self.inputs = inputs + + +class MediaGraphExtensionProcessorBase(MediaGraphProcessor): + """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension': 'MediaGraphCognitiveServicesVisionExtension', '#Microsoft.Media.MediaGraphGrpcExtension': 'MediaGraphGrpcExtension', '#Microsoft.Media.MediaGraphHttpExtension': 'MediaGraphHttpExtension'} + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + endpoint: Optional["MediaGraphEndpoint"] = None, + image: Optional["MediaGraphImage"] = None, + **kwargs + ): + super(MediaGraphExtensionProcessorBase, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str + self.endpoint = endpoint + self.image = image + + +class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + endpoint: Optional["MediaGraphEndpoint"] = None, + image: Optional["MediaGraphImage"] = None, + **kwargs + ): + super(MediaGraphCognitiveServicesVisionExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + self.type = '#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension' # type: str + + +class MediaGraphCredentials(msrest.serialization.Model): + """Credentials to present during authentication. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphHttpHeaderCredentials, MediaGraphUsernamePasswordCredentials. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphHttpHeaderCredentials': 'MediaGraphHttpHeaderCredentials', '#Microsoft.Media.MediaGraphUsernamePasswordCredentials': 'MediaGraphUsernamePasswordCredentials'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCredentials, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphEndpoint(msrest.serialization.Model): + """Base class for endpoints. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphTlsEndpoint, MediaGraphUnsecuredEndpoint. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphTlsEndpoint': 'MediaGraphTlsEndpoint', '#Microsoft.Media.MediaGraphUnsecuredEndpoint': 'MediaGraphUnsecuredEndpoint'} + } + + def __init__( + self, + *, + url: str, + credentials: Optional["MediaGraphCredentials"] = None, + **kwargs + ): + super(MediaGraphEndpoint, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.credentials = credentials + self.url = url + + +class MediaGraphFileSink(MediaGraphSink): + """Enables a media graph to write/store media (video and audio) to a file on the Edge device. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param file_path_pattern: Required. Absolute file path pattern for creating new files on the + Edge device. + :type file_path_pattern: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + 'file_path_pattern': {'required': True, 'min_length': 1}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + file_path_pattern: str, + **kwargs + ): + super(MediaGraphFileSink, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str + self.file_path_pattern = file_path_pattern + + +class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): + """Limits the frame rate on the input video stream based on the maximumFps property. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not + exceed this limit. + :type maximum_fps: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + maximum_fps: Optional[str] = None, + **kwargs + ): + super(MediaGraphFrameRateFilterProcessor, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str + self.maximum_fps = maximum_fps + + +class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + :param data_transfer: Required. How media should be transferred to the inferencing engine. + :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + 'data_transfer': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + data_transfer: "MediaGraphGrpcExtensionDataTransfer", + endpoint: Optional["MediaGraphEndpoint"] = None, + image: Optional["MediaGraphImage"] = None, + **kwargs + ): + super(MediaGraphGrpcExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str + self.data_transfer = data_transfer + + +class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): + """Describes how media should be transferred to the inferencing engine. + + All required parameters must be populated in order to send to Azure. + + :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if + mode is SharedMemory. Should not be specificed otherwise. + :type shared_memory_size_mi_b: str + :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible + values include: "Embedded", "SharedMemory". + :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode + """ + + _validation = { + 'mode': {'required': True}, + } + + _attribute_map = { + 'shared_memory_size_mi_b': {'key': 'sharedMemorySizeMiB', 'type': 'str'}, + 'mode': {'key': 'mode', 'type': 'str'}, + } + + def __init__( + self, + *, + mode: Union[str, "MediaGraphGrpcExtensionDataTransferMode"], + shared_memory_size_mi_b: Optional[str] = None, + **kwargs + ): + super(MediaGraphGrpcExtensionDataTransfer, self).__init__(**kwargs) + self.shared_memory_size_mi_b = shared_memory_size_mi_b + self.mode = mode + + +class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + endpoint: Optional["MediaGraphEndpoint"] = None, + image: Optional["MediaGraphImage"] = None, + **kwargs + ): + super(MediaGraphHttpExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + self.type = '#Microsoft.Media.MediaGraphHttpExtension' # type: str + + +class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): + """Http header service credentials. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param header_name: Required. HTTP header name. + :type header_name: str + :param header_value: Required. HTTP header value. + :type header_value: str + """ + + _validation = { + 'type': {'required': True}, + 'header_name': {'required': True}, + 'header_value': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'header_name': {'key': 'headerName', 'type': 'str'}, + 'header_value': {'key': 'headerValue', 'type': 'str'}, + } + + def __init__( + self, + *, + header_name: str, + header_value: str, + **kwargs + ): + super(MediaGraphHttpHeaderCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphHttpHeaderCredentials' # type: str + self.header_name = header_name + self.header_value = header_value + + +class MediaGraphImage(msrest.serialization.Model): + """Describes the properties of an image frame. + + :param scale: The scaling mode for the image. + :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale + :param format: Encoding settings for an image. + :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat + """ + + _attribute_map = { + 'scale': {'key': 'scale', 'type': 'MediaGraphImageScale'}, + 'format': {'key': 'format', 'type': 'MediaGraphImageFormat'}, + } + + def __init__( + self, + *, + scale: Optional["MediaGraphImageScale"] = None, + format: Optional["MediaGraphImageFormat"] = None, + **kwargs + ): + super(MediaGraphImage, self).__init__(**kwargs) + self.scale = scale + self.format = format + + +class MediaGraphImageFormat(msrest.serialization.Model): + """Encoding settings for an image. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormat, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphImageFormatEncoded(MediaGraphImageFormat): + """Allowed formats for the image. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param encoding: The different encoding formats that can be used for the image. Possible values + include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". + :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat + :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best + quality). + :type quality: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'encoding': {'key': 'encoding', 'type': 'str'}, + 'quality': {'key': 'quality', 'type': 'str'}, + } + + def __init__( + self, + *, + encoding: Optional[Union[str, "MediaGraphImageEncodingFormat"]] = "Jpeg", + quality: Optional[str] = None, + **kwargs + ): + super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str + self.encoding = encoding + self.quality = quality + + +class MediaGraphImageFormatRaw(MediaGraphImageFormat): + """Encoding settings for raw images. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", + "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". + :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'pixel_format': {'key': 'pixelFormat', 'type': 'str'}, + } + + def __init__( + self, + *, + pixel_format: Optional[Union[str, "MediaGraphImageFormatRawPixelFormat"]] = None, + **kwargs + ): + super(MediaGraphImageFormatRaw, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatRaw' # type: str + self.pixel_format = pixel_format + + +class MediaGraphImageScale(msrest.serialization.Model): + """The scaling mode for the image. + + :param mode: Describes the modes for scaling an input video frame into an image, before it is + sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". + :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode + :param width: The desired output width of the image. + :type width: str + :param height: The desired output height of the image. + :type height: str + """ + + _attribute_map = { + 'mode': {'key': 'mode', 'type': 'str'}, + 'width': {'key': 'width', 'type': 'str'}, + 'height': {'key': 'height', 'type': 'str'}, + } + + def __init__( + self, + *, + mode: Optional[Union[str, "MediaGraphImageScaleMode"]] = None, + width: Optional[str] = None, + height: Optional[str] = None, + **kwargs + ): + super(MediaGraphImageScale, self).__init__(**kwargs) + self.mode = mode + self.width = width + self.height = height + + +class MediaGraphInstance(msrest.serialization.Model): + """Represents a Media Graph instance. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Properties of a Media Graph instance. + :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, + } + + def __init__( + self, + *, + name: str, + system_data: Optional["MediaGraphSystemData"] = None, + properties: Optional["MediaGraphInstanceProperties"] = None, + **kwargs + ): + super(MediaGraphInstance, self).__init__(**kwargs) + self.name = name + self.system_data = system_data + self.properties = properties + + +class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): + """MediaGraphInstanceActivateRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphInstanceActivateRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphInstanceActivate' # type: str + + +class MediaGraphInstanceCollection(msrest.serialization.Model): + """Collection of graph instances. + + :param value: Collection of graph instances. + :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] + :param continuation_token: Continuation token to use in subsequent calls to enumerate through + the graph instance collection (when the collection contains too many results to return in one + response). + :type continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[MediaGraphInstance]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + *, + value: Optional[List["MediaGraphInstance"]] = None, + continuation_token: Optional[str] = None, + **kwargs + ): + super(MediaGraphInstanceCollection, self).__init__(**kwargs) + self.value = value + self.continuation_token = continuation_token + + +class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): + """MediaGraphInstanceDeActivateRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphInstanceDeActivateRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphInstanceDeactivate' # type: str + + +class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): + """MediaGraphInstanceDeleteRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphInstanceDeleteRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphInstanceDelete' # type: str + + +class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): + """MediaGraphInstanceGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphInstanceGetRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphInstanceGet' # type: str + + +class MediaGraphInstanceListRequest(OperationBase): + """MediaGraphInstanceListRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceListRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceList' # type: str + + +class MediaGraphInstanceProperties(msrest.serialization.Model): + """Properties of a Media Graph instance. + + :param description: An optional description for the instance. + :type description: str + :param topology_name: The name of the graph topology that this instance will run. A topology + with this name should already have been set in the Edge module. + :type topology_name: str + :param parameters: List of one or more graph instance parameters. + :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] + :param state: Allowed states for a graph Instance. Possible values include: "Inactive", + "Activating", "Active", "Deactivating". + :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'topology_name': {'key': 'topologyName', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDefinition]'}, + 'state': {'key': 'state', 'type': 'str'}, + } + + def __init__( + self, + *, + description: Optional[str] = None, + topology_name: Optional[str] = None, + parameters: Optional[List["MediaGraphParameterDefinition"]] = None, + state: Optional[Union[str, "MediaGraphInstanceState"]] = None, + **kwargs + ): + super(MediaGraphInstanceProperties, self).__init__(**kwargs) + self.description = description + self.topology_name = topology_name + self.parameters = parameters + self.state = state + + +class MediaGraphInstanceSetRequest(OperationBase): + """MediaGraphInstanceSetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param instance: Required. Represents a Media Graph instance. + :type instance: ~azure.media.lva.edge.models.MediaGraphInstance + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'instance': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + instance: "MediaGraphInstance", + **kwargs + ): + super(MediaGraphInstanceSetRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceSet' # type: str + self.instance = instance + + +class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): + """MediaGraphInstanceSetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Properties of a Media Graph instance. + :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + system_data: Optional["MediaGraphSystemData"] = None, + properties: Optional["MediaGraphInstanceProperties"] = None, + **kwargs + ): + super(MediaGraphInstanceSetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) + self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str + self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str + self.name = name + self.system_data = system_data + self.properties = properties + + +class MediaGraphIoTHubMessageSink(MediaGraphSink): + """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param hub_output_name: Name of the output path to which the graph will publish message. These + messages can then be delivered to desired destinations by declaring routes referencing the + output path in the IoT Edge deployment manifest. + :type hub_output_name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'hub_output_name': {'key': 'hubOutputName', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + hub_output_name: Optional[str] = None, + **kwargs + ): + super(MediaGraphIoTHubMessageSink, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSink' # type: str + self.hub_output_name = hub_output_name + + +class MediaGraphSource(msrest.serialization.Model): + """Media graph source. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphIoTHubMessageSource': 'MediaGraphIoTHubMessageSource', '#Microsoft.Media.MediaGraphRtspSource': 'MediaGraphRtspSource'} + } + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphSource, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = name + + +class MediaGraphIoTHubMessageSource(MediaGraphSource): + """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + :param hub_input_name: Name of the input path where messages can be routed to (via routes + declared in the IoT Edge deployment manifest). + :type hub_input_name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'hub_input_name': {'key': 'hubInputName', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + hub_input_name: Optional[str] = None, + **kwargs + ): + super(MediaGraphIoTHubMessageSource, self).__init__(name=name, **kwargs) + self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSource' # type: str + self.hub_input_name = hub_input_name + + +class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): + """A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param sensitivity: Enumeration that specifies the sensitivity of the motion detection + processor. Possible values include: "Low", "Medium", "High". + :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity + :param output_motion_region: Indicates whether the processor should detect and output the + regions, within the video frame, where motion was detected. Default is true. + :type output_motion_region: bool + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, + 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + sensitivity: Optional[Union[str, "MediaGraphMotionDetectionSensitivity"]] = None, + output_motion_region: Optional[bool] = None, + **kwargs + ): + super(MediaGraphMotionDetectionProcessor, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str + self.sensitivity = sensitivity + self.output_motion_region = output_motion_region + + +class MediaGraphNodeInput(msrest.serialization.Model): + """Represents the input to any node in a media graph. + + :param node_name: The name of another node in the media graph, the output of which is used as + input to this node. + :type node_name: str + :param output_selectors: Allows for the selection of particular streams from another node. + :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] + """ + + _attribute_map = { + 'node_name': {'key': 'nodeName', 'type': 'str'}, + 'output_selectors': {'key': 'outputSelectors', 'type': '[MediaGraphOutputSelector]'}, + } + + def __init__( + self, + *, + node_name: Optional[str] = None, + output_selectors: Optional[List["MediaGraphOutputSelector"]] = None, + **kwargs + ): + super(MediaGraphNodeInput, self).__init__(**kwargs) + self.node_name = node_name + self.output_selectors = output_selectors + + +class MediaGraphOutputSelector(msrest.serialization.Model): + """Allows for the selection of particular streams from another node. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar property: The stream property to compare with. Default value: "mediaType". + :vartype property: str + :param operator: The operator to compare streams by. Possible values include: "is", "isNot". + :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator + :param value: Value to compare against. + :type value: str + """ + + _validation = { + 'property': {'constant': True}, + } + + _attribute_map = { + 'property': {'key': 'property', 'type': 'str'}, + 'operator': {'key': 'operator', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + property = "mediaType" + + def __init__( + self, + *, + operator: Optional[Union[str, "MediaGraphOutputSelectorOperator"]] = None, + value: Optional[str] = None, + **kwargs + ): + super(MediaGraphOutputSelector, self).__init__(**kwargs) + self.operator = operator + self.value = value + + +class MediaGraphParameterDeclaration(msrest.serialization.Model): + """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the parameter. + :type name: str + :param type: Required. name. Possible values include: "String", "SecretString", "Int", + "Double", "Bool". + :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType + :param description: Description of the parameter. + :type description: str + :param default: The default value for the parameter, to be used if the graph instance does not + specify a value. + :type default: str + """ + + _validation = { + 'name': {'required': True, 'max_length': 64, 'min_length': 0}, + 'type': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'default': {'key': 'default', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + type: Union[str, "MediaGraphParameterType"], + description: Optional[str] = None, + default: Optional[str] = None, + **kwargs + ): + super(MediaGraphParameterDeclaration, self).__init__(**kwargs) + self.name = name + self.type = type + self.description = description + self.default = default + + +class MediaGraphParameterDefinition(msrest.serialization.Model): + """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. Name of parameter as defined in the graph topology. + :type name: str + :param value: Required. Value of parameter. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + value: str, + **kwargs + ): + super(MediaGraphParameterDefinition, self).__init__(**kwargs) + self.name = name + self.value = value + + +class MediaGraphPemCertificateList(MediaGraphCertificateSource): + """A list of PEM formatted certificates. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param certificates: Required. PEM formatted public certificates one per entry. + :type certificates: list[str] + """ + + _validation = { + 'type': {'required': True}, + 'certificates': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'certificates': {'key': 'certificates', 'type': '[str]'}, + } + + def __init__( + self, + *, + certificates: List[str], + **kwargs + ): + super(MediaGraphPemCertificateList, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphPemCertificateList' # type: str + self.certificates = certificates + + +class MediaGraphRtspSource(MediaGraphSource): + """Enables a graph to capture media from a RTSP server. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. + Possible values include: "Http", "Tcp". + :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport + :param endpoint: Required. RTSP endpoint of the stream that is being connected to. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'endpoint': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'transport': {'key': 'transport', 'type': 'str'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + } + + def __init__( + self, + *, + name: str, + endpoint: "MediaGraphEndpoint", + transport: Optional[Union[str, "MediaGraphRtspTransport"]] = None, + **kwargs + ): + super(MediaGraphRtspSource, self).__init__(name=name, **kwargs) + self.type = '#Microsoft.Media.MediaGraphRtspSource' # type: str + self.transport = transport + self.endpoint = endpoint + + +class MediaGraphSignalGateProcessor(MediaGraphProcessor): + """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param activation_evaluation_window: The period of time over which the gate gathers input + events, before evaluating them. + :type activation_evaluation_window: str + :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It + is an offset between the time the event is received, and the timestamp of the first media + sample (eg. video frame) that is allowed through by the gate. + :type activation_signal_offset: str + :param minimum_activation_time: The minimum period for which the gate remains open, in the + absence of subsequent triggers (events). + :type minimum_activation_time: str + :param maximum_activation_time: The maximum period for which the gate remains open, in the + presence of subsequent events. + :type maximum_activation_time: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'activation_evaluation_window': {'key': 'activationEvaluationWindow', 'type': 'str'}, + 'activation_signal_offset': {'key': 'activationSignalOffset', 'type': 'str'}, + 'minimum_activation_time': {'key': 'minimumActivationTime', 'type': 'str'}, + 'maximum_activation_time': {'key': 'maximumActivationTime', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + activation_evaluation_window: Optional[str] = None, + activation_signal_offset: Optional[str] = None, + minimum_activation_time: Optional[str] = None, + maximum_activation_time: Optional[str] = None, + **kwargs + ): + super(MediaGraphSignalGateProcessor, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphSignalGateProcessor' # type: str + self.activation_evaluation_window = activation_evaluation_window + self.activation_signal_offset = activation_signal_offset + self.minimum_activation_time = minimum_activation_time + self.maximum_activation_time = maximum_activation_time + + +class MediaGraphSystemData(msrest.serialization.Model): + """Graph system data. + + :param created_at: The timestamp of resource creation (UTC). + :type created_at: ~datetime.datetime + :param last_modified_at: The timestamp of resource last modification (UTC). + :type last_modified_at: ~datetime.datetime + """ + + _attribute_map = { + 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, + 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, + } + + def __init__( + self, + *, + created_at: Optional[datetime.datetime] = None, + last_modified_at: Optional[datetime.datetime] = None, + **kwargs + ): + super(MediaGraphSystemData, self).__init__(**kwargs) + self.created_at = created_at + self.last_modified_at = last_modified_at + + +class MediaGraphTlsEndpoint(MediaGraphEndpoint): + """An endpoint that the graph can connect to, which must be connected over TLS/SSL. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null + designates that Azure Media Service's source of trust should be used. + :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource + :param validation_options: Validation options to use when authenticating a TLS connection. By + default, strict validation is used. + :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'MediaGraphCertificateSource'}, + 'validation_options': {'key': 'validationOptions', 'type': 'MediaGraphTlsValidationOptions'}, + } + + def __init__( + self, + *, + url: str, + credentials: Optional["MediaGraphCredentials"] = None, + trusted_certificates: Optional["MediaGraphCertificateSource"] = None, + validation_options: Optional["MediaGraphTlsValidationOptions"] = None, + **kwargs + ): + super(MediaGraphTlsEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) + self.type = '#Microsoft.Media.MediaGraphTlsEndpoint' # type: str + self.trusted_certificates = trusted_certificates + self.validation_options = validation_options + + +class MediaGraphTlsValidationOptions(msrest.serialization.Model): + """Options for controlling the authentication of TLS endpoints. + + :param ignore_hostname: Boolean value ignoring the host name (common name) during validation. + :type ignore_hostname: str + :param ignore_signature: Boolean value ignoring the integrity of the certificate chain at the + current time. + :type ignore_signature: str + """ + + _attribute_map = { + 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, + 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, + } + + def __init__( + self, + *, + ignore_hostname: Optional[str] = None, + ignore_signature: Optional[str] = None, + **kwargs + ): + super(MediaGraphTlsValidationOptions, self).__init__(**kwargs) + self.ignore_hostname = ignore_hostname + self.ignore_signature = ignore_signature + + +class MediaGraphTopology(msrest.serialization.Model): + """Describes a graph topology. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Describes the properties of a graph topology. + :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, + } + + def __init__( + self, + *, + name: str, + system_data: Optional["MediaGraphSystemData"] = None, + properties: Optional["MediaGraphTopologyProperties"] = None, + **kwargs + ): + super(MediaGraphTopology, self).__init__(**kwargs) + self.name = name + self.system_data = system_data + self.properties = properties + + +class MediaGraphTopologyCollection(msrest.serialization.Model): + """Collection of graph topologies. + + :param value: Collection of graph topologies. + :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] + :param continuation_token: Continuation token to use in subsequent calls to enumerate through + the graph topologies collection (when the collection contains too many results to return in one + response). + :type continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[MediaGraphTopology]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + *, + value: Optional[List["MediaGraphTopology"]] = None, + continuation_token: Optional[str] = None, + **kwargs + ): + super(MediaGraphTopologyCollection, self).__init__(**kwargs) + self.value = value + self.continuation_token = continuation_token + + +class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): + """MediaGraphTopologyDeleteRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphTopologyDeleteRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphTopologyDelete' # type: str + + +class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): + """MediaGraphTopologyGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphTopologyGetRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphTopologyGet' # type: str + + +class MediaGraphTopologyListRequest(OperationBase): + """MediaGraphTopologyListRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyListRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologyList' # type: str + + +class MediaGraphTopologyProperties(msrest.serialization.Model): + """Describes the properties of a graph topology. + + :param description: An optional description for the instance. + :type description: str + :param parameters: An optional description for the instance. + :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] + :param sources: An optional description for the instance. + :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] + :param processors: An optional description for the instance. + :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] + :param sinks: name. + :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDeclaration]'}, + 'sources': {'key': 'sources', 'type': '[MediaGraphSource]'}, + 'processors': {'key': 'processors', 'type': '[MediaGraphProcessor]'}, + 'sinks': {'key': 'sinks', 'type': '[MediaGraphSink]'}, + } + + def __init__( + self, + *, + description: Optional[str] = None, + parameters: Optional[List["MediaGraphParameterDeclaration"]] = None, + sources: Optional[List["MediaGraphSource"]] = None, + processors: Optional[List["MediaGraphProcessor"]] = None, + sinks: Optional[List["MediaGraphSink"]] = None, + **kwargs + ): + super(MediaGraphTopologyProperties, self).__init__(**kwargs) + self.description = description + self.parameters = parameters + self.sources = sources + self.processors = processors + self.sinks = sinks + + +class MediaGraphTopologySetRequest(OperationBase): + """MediaGraphTopologySetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param graph: Required. Describes a graph topology. + :type graph: ~azure.media.lva.edge.models.MediaGraphTopology + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'graph': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + graph: "MediaGraphTopology", + **kwargs + ): + super(MediaGraphTopologySetRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologySet' # type: str + self.graph = graph + + +class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): + """MediaGraphTopologySetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Describes the properties of a graph topology. + :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + system_data: Optional["MediaGraphSystemData"] = None, + properties: Optional["MediaGraphTopologyProperties"] = None, + **kwargs + ): + super(MediaGraphTopologySetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) + self.method_name = 'MediaGraphTopologySetRequestBody' # type: str + self.method_name = 'MediaGraphTopologySetRequestBody' # type: str + self.name = name + self.system_data = system_data + self.properties = properties + + +class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): + """An endpoint that the media graph can connect to, with no encryption in transit. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__( + self, + *, + url: str, + credentials: Optional["MediaGraphCredentials"] = None, + **kwargs + ): + super(MediaGraphUnsecuredEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) + self.type = '#Microsoft.Media.MediaGraphUnsecuredEndpoint' # type: str + + +class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): + """Username/password credential pair. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param username: Required. Username for a username/password pair. + :type username: str + :param password: Password for a username/password pair. + :type password: str + """ + + _validation = { + 'type': {'required': True}, + 'username': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'username': {'key': 'username', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__( + self, + *, + username: str, + password: Optional[str] = None, + **kwargs + ): + super(MediaGraphUsernamePasswordCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphUsernamePasswordCredentials' # type: str + self.username = username + self.password = password diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/py.typed b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/py.typed new file mode 100644 index 000000000000..e5aff4f83af8 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_version.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_version.py new file mode 100644 index 000000000000..f95f18986f48 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_version.py @@ -0,0 +1,7 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- + +VERSION = '0.0.1' diff --git a/sdk/media/azure-media-lva-edge/dev_requirements.txt b/sdk/media/azure-media-lva-edge/dev_requirements.txt new file mode 100644 index 000000000000..08bcfb306787 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/dev_requirements.txt @@ -0,0 +1,11 @@ +../../core/azure-core +-e ../../../tools/azure-devtools +-e ../../../tools/azure-sdk-tools +-e ../../identity/azure-identity +aiohttp>=3.0; python_version >= '3.5' +aiodns>=2.0; python_version >= '3.5' +msrest>=0.6.10 +pytest==5.4.2 +tox>=3.20.0 +tox-monorepo>=0.1.2 +pytest-asyncio==0.12.0 diff --git a/sdk/media/azure-media-lva-edge/samples/sample_conditional_async.py b/sdk/media/azure-media-lva-edge/samples/sample_conditional_async.py new file mode 100644 index 000000000000..c894b9b71a09 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/samples/sample_conditional_async.py @@ -0,0 +1,48 @@ +# coding: utf-8 + +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import asyncio +import os +from colorama import init, Style, Fore +init() + +from azure.identity.aio import DefaultAzureCredential +from azure.learnappconfig.aio import AppConfigurationClient +from azure.core.exceptions import ResourceNotFoundError, ResourceNotModifiedError +from azure.core import MatchConditions + + +async def main(): + url = os.environ.get('API-LEARN_ENDPOINT') + credential = DefaultAzureCredential() + async with AppConfigurationClient(account_url=url, credential=credential) as client: + + # Retrieve initial color value + try: + first_color = await client.get_configuration_setting(os.environ['API-LEARN_SETTING_COLOR_KEY']) + except ResourceNotFoundError: + raise + + # Get latest color value, only if it has changed + try: + new_color = await client.get_configuration_setting( + key=os.environ['API-LEARN_SETTING_COLOR_KEY'], + match_condition=MatchConditions.IfModified, + etag=first_color.etag + ) + except ResourceNotModifiedError: + new_color = first_color + + color = getattr(Fore, new_color.value.upper()) + greeting = 'Hello!' + print(f'{color}{greeting}{Style.RESET_ALL}') + + +if __name__ == "__main__": + loop = asyncio.get_event_loop() + loop.run_until_complete(main()) diff --git a/sdk/media/azure-media-lva-edge/samples/sample_hello_world.py b/sdk/media/azure-media-lva-edge/samples/sample_hello_world.py new file mode 100644 index 000000000000..f6fa6e0686fd --- /dev/null +++ b/sdk/media/azure-media-lva-edge/samples/sample_hello_world.py @@ -0,0 +1,35 @@ +# coding: utf-8 + +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os +from colorama import init, Style, Fore +init() + +from azure.identity import DefaultAzureCredential +from azure.learnappconfig import AppConfigurationClient + +def main(): + url = os.environ.get('API-LEARN_ENDPOINT') + credential = DefaultAzureCredential() + client = AppConfigurationClient(account_url=url, credential=credential) + + try: + color_setting = client.get_configuration_setting(os.environ['API-LEARN_SETTING_COLOR_KEY']) + color = color_setting.value.upper() + text_setting = client.get_configuration_setting(os.environ['API-LEARN_SETTING_TEXT_KEY']) + greeting = text_setting.value + except: + color = 'RED' + greeting = 'Default greeting' + + color = getattr(Fore, color) + print(f'{color}{greeting}{Style.RESET_ALL}') + + +if __name__ == "__main__": + main() diff --git a/sdk/media/azure-media-lva-edge/samples/sample_lva.py b/sdk/media/azure-media-lva-edge/samples/sample_lva.py new file mode 100644 index 000000000000..9ac9ca9a817a --- /dev/null +++ b/sdk/media/azure-media-lva-edge/samples/sample_lva.py @@ -0,0 +1,83 @@ + +import json +import os +from azure.media.lva.edge._generated.models import * +from azure.iot.hub import IoTHubRegistryManager +from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult +from datetime import time + +device_id = "lva-sample-device" +module_d = "lvaEdge" +connection_string = "HostName=lvasamplehub77xvrvtar2bpw.azure-devices.net;SharedAccessKeyName=iothubowner;SharedAccessKey=o77hgzsswnBZsaGKVSDjSmm53m4ViJb/s1xv9zfDCi0=" +graph_instance_name = "graphInstance1" +graph_topology_name = "graphTopology1" + + +def build_graph_topology(): + graph_properties = MediaGraphTopologyProperties() + graph_properties.description = "Continuous video recording to an Azure Media Services Asset" + user_name_param = MediaGraphParameterDeclaration(name="rtspUserName",type="String",default="dummyusername") + password_param = MediaGraphParameterDeclaration(name="rtspPassword",type="String",default="dummypassword") + url_param = MediaGraphParameterDeclaration(name="rtspUrl",type="String",default="rtsp://www.sample.com") + + source = MediaGraphRtspSource(name="rtspSource", endpoint=MediaGraphUnsecuredEndpoint(url="${rtspUrl}",credentials=MediaGraphUsernamePasswordCredentials(username="${rtspUserName}",password="${rtspPassword}"))) + node = MediaGraphNodeInput(node_name="rtspSource") + sink = MediaGraphAssetSink(name="assetsink", inputs=[node],asset_name_pattern='sampleAsset-${System.GraphTopologyName}-${System.GraphInstanceName}', segment_length="PT0H0M30S",local_media_cache_maximum_size_mi_b=2048,local_media_cache_path="/var/lib/azuremediaservices/tmp/") + graph_properties.parameters = [user_name_param, password_param, url_param] + graph_properties.sources = [source] + graph_properties.sinks = [sink] + graph = MediaGraphTopology(name=graph_topology_name,properties=graph_properties) + + return graph + +def build_graph_instance(): + url_param = MediaGraphParameterDefinition(name="rtspUrl", value="rtsp://rtspsim:554/media/camera-300s.mkv") + graph_instance_properties = MediaGraphInstanceProperties(description="Sample graph description", topology_name=graph_topology_name, parameters=[url_param]) + + graph_instance = MediaGraphInstance(name=graph_instance_name, properties=graph_instance_properties) + + return graph_instance + +def invoke_method(method): + direct_method = CloudToDeviceMethod(method_name=method.method_name, payload=method.serialize()) + registry_manager = IoTHubRegistryManager(connection_string) + + return registry_manager.invoke_device_module_method(device_id, module_d, direct_method) + +def main(): + graph_topology = build_graph_topology() + graph_instance = build_graph_instance() + + try: + set_graph = invoke_method(MediaGraphTopologySetRequest(graph=graph_topology)) + set_graph_result = MediaGraphTopology.deserialize(set_graph) + + list_graph = invoke_method(MediaGraphTopologyListRequest()) + list_graph_result = MediaGraphTopology.deserialize(list_graph) + + get_graph = invoke_method(MediaGraphTopologyGetRequest(name=graph_topology_name)) + get_graph_result = MediaGraphTopology.deserialize(get_graph) + + set_graph_instance = invoke_method(MediaGraphInstanceSetRequest(instance=graph_instance)) + set_graph_instance_result = MediaGraphInstance.deserialize(set_graph_instance) + + activate_graph_instance = invoke_method(MediaGraphInstanceActivateRequest(name=graph_instance_name)) + activate_graph_instance_result = MediaGraphInstance.deserialize(activate_graph_instance) + + get_graph_instance = invoke_method(MediaGraphInstanceGetRequest(name=graph_instance_name)) + get_graph_instance_result = MediaGraphInstance.deserialize(get_graph_instance) + + deactivate_graph_instance = invoke_method(MediaGraphInstanceDeActivateRequest(name=graph_instance_name)) + deactivate_graph_instance_result = MediaGraphInstance.deserialize(deactivate_graph_instance) + + delete_graph_instance = invoke_method(MediaGraphInstanceDeleteRequest(name=graph_instance_name)) + delete_graph_instance_result = MediaGraphInstance.deserialize(delete_graph_instance) + + delete_graph = invoke_method(MediaGraphTopologyDeleteRequest(name=graph_topology_name)) + delete_graph_result = MediaGraphTopology.deserialize(delete_graph) + + except Exception as ex: + print(ex) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/sdk_packaging.toml b/sdk/media/azure-media-lva-edge/sdk_packaging.toml new file mode 100644 index 000000000000..b366f78fb41b --- /dev/null +++ b/sdk/media/azure-media-lva-edge/sdk_packaging.toml @@ -0,0 +1,4 @@ +[packaging] +is_arm = false +need_msrestazure = false +auto_update = false diff --git a/sdk/media/azure-media-lva-edge/setup.cfg b/sdk/media/azure-media-lva-edge/setup.cfg new file mode 100644 index 000000000000..3c6e79cf31da --- /dev/null +++ b/sdk/media/azure-media-lva-edge/setup.cfg @@ -0,0 +1,2 @@ +[bdist_wheel] +universal=1 diff --git a/sdk/media/azure-media-lva-edge/setup.py b/sdk/media/azure-media-lva-edge/setup.py new file mode 100644 index 000000000000..d4a8c12edcc6 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/setup.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python + +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import sys +import re +import os.path +from io import open +from setuptools import find_packages, setup + +# Change the PACKAGE_NAME only to change folder and different name +PACKAGE_NAME = "azure-media-lva-edge" +PACKAGE_PPRINT_NAME = "Azure Media Live Video Analytics Edge SDK" + +# a-b-c => a/b/c +package_folder_path = PACKAGE_NAME.replace('-', '/') +# a-b-c => a.b.c +namespace_name = PACKAGE_NAME.replace('-', '.') + +# azure v0.x is not compatible with this package +# azure v0.x used to have a __version__ attribute (newer versions don't) +try: + import azure + try: + ver = azure.__version__ + raise Exception( + 'This package is incompatible with azure=={}. '.format(ver) + + 'Uninstall it with "pip uninstall azure".' + ) + except AttributeError: + pass +except ImportError: + pass + +# Version extraction inspired from 'requests' +with open(os.path.join(package_folder_path, '_version.py'), 'r') as fd: + version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', + fd.read(), re.MULTILINE).group(1) + +if not version: + raise RuntimeError('Cannot find version information') + +with open('README.md', encoding='utf-8') as f: + readme = f.read() +with open('CHANGELOG.md', encoding='utf-8') as f: + changelog = f.read() + +exclude_packages = [ + 'tests', + 'tests.*', + 'samples', + # Exclude packages that will be covered by PEP420 or nspkg + 'azure', + ] +if sys.version_info < (3, 5, 3): + exclude_packages.extend([ + '*.aio', + '*.aio.*' + ]) + +setup( + name=PACKAGE_NAME, + version=version, + description='Microsoft {} Library for Python'.format(PACKAGE_PPRINT_NAME), + long_description=readme + '\n\n' + changelog, + long_description_content_type='text/markdown', + license='MIT License', + author='Microsoft Corporation', + author_email='azpysdkhelp@microsoft.com', + url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/appconfiguration/azure-appconfiguration', + classifiers=[ + "Development Status :: 5 - Production/Stable", + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'License :: OSI Approved :: MIT License', + ], + zip_safe=False, + packages=find_packages(exclude=exclude_packages), + install_requires=[ + "msrest>=0.6.10", + "azure-core<2.0.0,>=1.0.0", + ], + extras_require={ + ":python_version<'3.0'": ['azure-nspkg'], + ":python_version<'3.4'": ['enum34>=1.0.4'], + ":python_version<'3.5'": ['typing'], + "async:python_version>='3.5'": [ + 'aiohttp>=3.0', + 'aiodns>=2.0' + ], + } +) \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/swagger/README.md b/sdk/media/azure-media-lva-edge/swagger/README.md new file mode 100644 index 000000000000..7880fc364c91 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/swagger/README.md @@ -0,0 +1,26 @@ +# Azure Queue Storage for Python + +> see https://aka.ms/autorest + + +### Generation +```ps +cd +autorest --v3 --python README.md +``` + +### Settings +```yaml +require: C:\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md +output-folder: ../azure/media/lva/edge/_generated +namespace: azure.media.lva.edge +no-namespace-folders: true +license-header: MICROSOFT_MIT_NO_VERSION +enable-xml: false +vanilla: true +clear-output-folder: true +add-credentials: false +python: true +package-version: "1.0" +public-clients: false +``` diff --git a/sdk/media/azure-media-lva-edge/swagger/appconfiguration.json b/sdk/media/azure-media-lva-edge/swagger/appconfiguration.json new file mode 100644 index 000000000000..36b206ca6142 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/swagger/appconfiguration.json @@ -0,0 +1,1239 @@ +{ + "swagger": "2.0", + "info": { + "description": "Direct Methods for Live Video Analytics on IoT Edge.", + "version": "1.0.4", + "title": "Direct Methods for Live Video Analytics on IoT Edge", + "contact": { + "email": "amshelp@microsoft.com" + } + }, + "security": [ + { + "sharedAccessSignature": [] + } + ], + "paths": {}, + "securityDefinitions": { + "sharedAccessSignature": { + "type": "apiKey", + "name": "Authorization", + "in": "header" + } + }, + "definitions": { + "OperationBase": { + "type": "object", + "properties": { + "methodName": { + "type": "string", + "description": "method name", + "readOnly": true + }, + "@apiVersion": { + "type": "string", + "description": "api version", + "enum": [ + "1.0" + ], + "x-ms-enum": { + "name": "ApiVersionEnum", + "modelAsString": false + } + } + }, + "discriminator": "methodName" + }, + "MediaGraphTopologySetRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphTopologySet", + "allOf": [ + { + "$ref": "#/definitions/OperationBase" + } + ], + "required": [ + "graph" + ], + "properties": { + "graph": { + "$ref": "#/definitions/MediaGraphTopology" + } + } + }, + "MediaGraphTopologySetRequestBody": { + "type": "object", + "x-ms-discriminator-value": "GraphTopologySet", + "allOf": [ + { + "$ref": "#/definitions/OperationBase" + }, + { + "$ref": "#/definitions/MediaGraphTopology" + } + ] + }, + "MediaGraphInstanceSetRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceSet", + "allOf": [ + { + "$ref": "#/definitions/OperationBase" + } + ], + "required": [ + "instance" + ], + "properties": { + "instance": { + "$ref": "#/definitions/MediaGraphInstance" + } + } + }, + "ItemNonSetRequestBase": { + "type": "object", + "allOf": [ + { + "$ref": "#/definitions/OperationBase" + } + ], + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "method name" + } + } + }, + "MediaGraphTopologyListRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphTopologyList", + "allOf": [ + { + "$ref": "#/definitions/OperationBase" + } + ] + }, + "MediaGraphTopologyGetRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphTopologyGet", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ] + }, + "MediaGraphTopologyDeleteRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphTopologyDelete", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ] + }, + "MediaGraphInstanceListRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceList", + "allOf": [ + { + "$ref": "#/definitions/OperationBase" + } + ] + }, + "MediaGraphInstanceGetRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceGet", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ] + }, + "MediaGraphInstanceActivateRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceActivate", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ] + }, + "MediaGraphInstanceDeActivateRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceDeactivate", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ] + }, + "MediaGraphInstanceDeleteRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceDelete", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ] + }, + "MediaGraphInstance": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "name" + }, + "systemData": { + "$ref": "#/definitions/MediaGraphSystemData" + }, + "properties": { + "$ref": "#/definitions/MediaGraphInstanceProperties" + } + }, + "description": "Represents a Media Graph instance." + }, + "MediaGraphInstanceProperties": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "An optional description for the instance." + }, + "topologyName": { + "type": "string", + "description": "The name of the graph topology that this instance will run. A topology with this name should already have been set in the Edge module." + }, + "parameters": { + "type": "array", + "description": "List of one or more graph instance parameters.", + "items": { + "$ref": "#/definitions/MediaGraphParameterDefinition" + } + }, + "state": { + "type": "string", + "description": "Allowed states for a graph Instance.", + "enum": [ + "Inactive", + "Activating", + "Active", + "Deactivating" + ], + "x-ms-enum": { + "name": "MediaGraphInstanceState", + "values": [ + { + "value": "Inactive", + "description": "Inactive state." + }, + { + "value": "Activating", + "description": "Activating state." + }, + { + "value": "Active", + "description": "Active state." + }, + { + "value": "Deactivating", + "description": "Deactivating state." + } + ], + "modelAsString": false + } + } + }, + "description": "Properties of a Media Graph instance." + }, + "MediaGraphParameterDefinition": { + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of parameter as defined in the graph topology." + }, + "value": { + "type": "string", + "description": "Value of parameter." + } + }, + "description": "A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters." + }, + "MediaGraphInstanceCollection": { + "properties": { + "value": { + "type": "array", + "description": "Collection of graph instances.", + "items": { + "$ref": "#/definitions/MediaGraphInstance" + } + }, + "@continuationToken": { + "type": "string", + "description": "Continuation token to use in subsequent calls to enumerate through the graph instance collection (when the collection contains too many results to return in one response)." + } + }, + "description": "Collection of graph instances." + }, + "MediaGraphTopologyCollection": { + "properties": { + "value": { + "type": "array", + "description": "Collection of graph topologies.", + "items": { + "$ref": "#/definitions/MediaGraphTopology" + } + }, + "@continuationToken": { + "type": "string", + "description": "Continuation token to use in subsequent calls to enumerate through the graph topologies collection (when the collection contains too many results to return in one response)." + } + }, + "description": "Collection of graph topologies." + }, + "MediaGraphTopology": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "name" + }, + "systemData": { + "$ref": "#/definitions/MediaGraphSystemData" + }, + "properties": { + "$ref": "#/definitions/MediaGraphTopologyProperties" + } + }, + "description": "Describes a graph topology." + }, + "MediaGraphTopologyProperties": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "An optional description for the instance." + }, + "parameters": { + "type": "array", + "description": "An optional description for the instance.", + "items": { + "$ref": "#/definitions/MediaGraphParameterDeclaration" + } + }, + "sources": { + "type": "array", + "description": "An optional description for the instance.", + "items": { + "$ref": "#/definitions/MediaGraphSource" + } + }, + "processors": { + "type": "array", + "description": "An optional description for the instance.", + "items": { + "$ref": "#/definitions/MediaGraphProcessor" + } + }, + "sinks": { + "description": "name", + "type": "array", + "items": { + "$ref": "#/definitions/MediaGraphSink" + } + } + }, + "description": "Describes the properties of a graph topology." + }, + "MediaGraphSystemData": { + "type": "object", + "properties": { + "createdAt": { + "type": "string", + "format": "date-time", + "description": "The timestamp of resource creation (UTC)." + }, + "lastModifiedAt": { + "type": "string", + "format": "date-time", + "description": "The timestamp of resource last modification (UTC)." + } + }, + "description": "Graph system data." + }, + "MediaGraphParameterDeclaration": { + "type": "object", + "required": [ + "name", + "type" + ], + "properties": { + "name": { + "type": "string", + "description": "The name of the parameter.", + "maxLength": 64 + }, + "type": { + "type": "string", + "description": "name", + "enum": [ + "String", + "SecretString", + "Int", + "Double", + "Bool" + ], + "x-ms-enum": { + "name": "MediaGraphParameterType", + "values": [ + { + "value": "String", + "description": "A string parameter value." + }, + { + "value": "SecretString", + "description": "A string to hold sensitive information as parameter value." + }, + { + "value": "Int", + "description": "A 32-bit signed integer as parameter value." + }, + { + "value": "Double", + "description": "A 64-bit double-precision floating point type as parameter value." + }, + { + "value": "Bool", + "description": "A boolean value that is either true or false." + } + ], + "modelAsString": false + } + }, + "description": { + "type": "string", + "description": "Description of the parameter." + }, + "default": { + "type": "string", + "description": "The default value for the parameter, to be used if the graph instance does not specify a value." + } + }, + "description": "The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters." + }, + "MediaGraphSource": { + "type": "object", + "required": [ + "@type", + "name" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The type of the source node. The discriminator for derived types." + }, + "name": { + "type": "string", + "description": "The name to be used for this source node." + } + }, + "description": "Media graph source." + }, + "MediaGraphRtspSource": { + "properties": { + "transport": { + "type": "string", + "description": "Underlying RTSP transport. This is used to enable or disable HTTP tunneling.", + "enum": [ + "Http", + "Tcp" + ], + "x-ms-enum": { + "name": "MediaGraphRtspTransport", + "values": [ + { + "value": "Http", + "description": "HTTP/HTTPS transport. This should be used when HTTP tunneling is desired." + }, + { + "value": "Tcp", + "description": "TCP transport. This should be used when HTTP tunneling is NOT desired." + } + ], + "modelAsString": true + } + }, + "endpoint": { + "description": "RTSP endpoint of the stream that is being connected to.", + "$ref": "#/definitions/MediaGraphEndpoint" + } + }, + "required": [ + "endpoint" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphSource" + }, + {} + ], + "description": "Enables a graph to capture media from a RTSP server.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphRtspSource" + }, + "MediaGraphIoTHubMessageSource": { + "properties": { + "hubInputName": { + "type": "string", + "description": "Name of the input path where messages can be routed to (via routes declared in the IoT Edge deployment manifest)." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphSource" + }, + {} + ], + "description": "Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphIoTHubMessageSource" + }, + "MediaGraphIoTHubMessageSink": { + "properties": { + "hubOutputName": { + "type": "string", + "description": "Name of the output path to which the graph will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphSink" + }, + {} + ], + "description": "Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphIoTHubMessageSink" + }, + "MediaGraphEndpoint": { + "type": "object", + "required": [ + "@type", + "url" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + }, + "credentials": { + "description": "Polymorphic credentials to be presented to the endpoint.", + "$ref": "#/definitions/MediaGraphCredentials" + }, + "url": { + "type": "string", + "description": "Url for the endpoint." + } + }, + "description": "Base class for endpoints." + }, + "MediaGraphCredentials": { + "type": "object", + "required": [ + "@type" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + } + }, + "description": "Credentials to present during authentication." + }, + "MediaGraphUsernamePasswordCredentials": { + "properties": { + "username": { + "type": "string", + "description": "Username for a username/password pair." + }, + "password": { + "type": "string", + "description": "Password for a username/password pair." + } + }, + "required": [ + "username" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphCredentials" + }, + {} + ], + "description": "Username/password credential pair.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphUsernamePasswordCredentials" + }, + "MediaGraphHttpHeaderCredentials": { + "properties": { + "headerName": { + "type": "string", + "description": "HTTP header name." + }, + "headerValue": { + "type": "string", + "description": "HTTP header value." + } + }, + "required": [ + "headerName", + "headerValue" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphCredentials" + }, + {} + ], + "description": "Http header service credentials.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphHttpHeaderCredentials" + }, + "MediaGraphUnsecuredEndpoint": { + "allOf": [ + { + "$ref": "#/definitions/MediaGraphEndpoint" + }, + {} + ], + "description": "An endpoint that the media graph can connect to, with no encryption in transit.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphUnsecuredEndpoint" + }, + "MediaGraphTlsEndpoint": { + "properties": { + "trustedCertificates": { + "description": "Trusted certificates when authenticating a TLS connection. Null designates that Azure Media Service's source of trust should be used.", + "$ref": "#/definitions/MediaGraphCertificateSource" + }, + "validationOptions": { + "description": "Validation options to use when authenticating a TLS connection. By default, strict validation is used.", + "$ref": "#/definitions/MediaGraphTlsValidationOptions" + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphEndpoint" + }, + {} + ], + "description": "An endpoint that the graph can connect to, which must be connected over TLS/SSL.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphTlsEndpoint" + }, + "MediaGraphCertificateSource": { + "type": "object", + "required": [ + "@type" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + } + }, + "description": "Base class for certificate sources." + }, + "MediaGraphTlsValidationOptions": { + "type": "object", + "properties": { + "ignoreHostname": { + "type": "string", + "description": "Boolean value ignoring the host name (common name) during validation." + }, + "ignoreSignature": { + "type": "string", + "description": "Boolean value ignoring the integrity of the certificate chain at the current time." + } + }, + "description": "Options for controlling the authentication of TLS endpoints." + }, + "MediaGraphPemCertificateList": { + "properties": { + "certificates": { + "type": "array", + "description": "PEM formatted public certificates one per entry.", + "items": { + "type": "string" + } + } + }, + "required": [ + "certificates" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphCertificateSource" + }, + {} + ], + "description": "A list of PEM formatted certificates.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphPemCertificateList" + }, + "MediaGraphSink": { + "type": "object", + "required": [ + "@type", + "inputs", + "name" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + }, + "name": { + "type": "string", + "description": "Name to be used for the media graph sink." + }, + "inputs": { + "type": "array", + "description": "An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node.", + "items": { + "$ref": "#/definitions/MediaGraphNodeInput" + } + } + }, + "description": "Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module." + }, + "MediaGraphNodeInput": { + "type": "object", + "properties": { + "nodeName": { + "type": "string", + "description": "The name of another node in the media graph, the output of which is used as input to this node." + }, + "outputSelectors": { + "type": "array", + "description": "Allows for the selection of particular streams from another node.", + "items": { + "$ref": "#/definitions/MediaGraphOutputSelector" + } + } + }, + "description": "Represents the input to any node in a media graph." + }, + "MediaGraphOutputSelector": { + "properties": { + "property": { + "type": "string", + "description": "The stream property to compare with.", + "enum": [ + "mediaType" + ], + "x-ms-enum": { + "name": "MediaGraphOutputSelectorProperty", + "values": [ + { + "value": "mediaType", + "description": "The stream's MIME type or subtype." + } + ], + "modelAsString": false + } + }, + "operator": { + "type": "string", + "description": "The operator to compare streams by.", + "enum": [ + "is", + "isNot" + ], + "x-ms-enum": { + "name": "MediaGraphOutputSelectorOperator", + "values": [ + { + "value": "is", + "description": "A media type is the same type or a subtype." + }, + { + "value": "isNot", + "description": "A media type is not the same type or a subtype." + } + ], + "modelAsString": false + } + }, + "value": { + "type": "string", + "description": "Value to compare against." + } + }, + "description": "Allows for the selection of particular streams from another node." + }, + "MediaGraphFileSink": { + "properties": { + "filePathPattern": { + "type": "string", + "description": "Absolute file path pattern for creating new files on the Edge device.", + "minLength": 1 + } + }, + "required": [ + "filePathPattern" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphSink" + }, + {} + ], + "description": "Enables a media graph to write/store media (video and audio) to a file on the Edge device.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphFileSink" + }, + "MediaGraphAssetSink": { + "properties": { + "assetNamePattern": { + "type": "string", + "description": "A name pattern when creating new assets." + }, + "segmentLength": { + "type": "string", + "format": "duration", + "example": "PT30S", + "description": "When writing media to an asset, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum of 30 seconds and a recommended maximum of 5 minutes." + }, + "localMediaCachePath": { + "type": "string", + "description": "Path to a local file system directory for temporary caching of media, before writing to an Asset. Used when the Edge device is temporarily disconnected from Azure." + }, + "localMediaCacheMaximumSizeMiB": { + "type": "string", + "description": "Maximum amount of disk space that can be used for temporary caching of media." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphSink" + }, + {} + ], + "description": "Enables a graph to record media to an Azure Media Services asset, for subsequent playback.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphAssetSink" + }, + "MediaGraphProcessor": { + "type": "object", + "required": [ + "@type", + "inputs", + "name" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + }, + "name": { + "type": "string", + "description": "The name for this processor node." + }, + "inputs": { + "type": "array", + "description": "An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node.", + "items": { + "$ref": "#/definitions/MediaGraphNodeInput" + } + } + }, + "description": "A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output." + }, + "MediaGraphMotionDetectionProcessor": { + "properties": { + "sensitivity": { + "type": "string", + "description": "Enumeration that specifies the sensitivity of the motion detection processor.", + "enum": [ + "Low", + "Medium", + "High" + ], + "x-ms-enum": { + "name": "MediaGraphMotionDetectionSensitivity", + "values": [ + { + "value": "Low", + "description": "Low Sensitivity." + }, + { + "value": "Medium", + "description": "Medium Sensitivity." + }, + { + "value": "High", + "description": "High Sensitivity." + } + ], + "modelAsString": true + } + }, + "outputMotionRegion": { + "type": "boolean", + "description": "Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphProcessor" + }, + {} + ], + "description": "A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphMotionDetectionProcessor" + }, + "MediaGraphExtensionProcessorBase": { + "properties": { + "endpoint": { + "description": "Endpoint to which this processor should connect.", + "$ref": "#/definitions/MediaGraphEndpoint" + }, + "image": { + "description": "Describes the parameters of the image that is sent as input to the endpoint.", + "$ref": "#/definitions/MediaGraphImage" + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphProcessor" + }, + {} + ], + "description": "Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphExtensionProcessorBase" + }, + "MediaGraphCognitiveServicesVisionExtension": { + "properties": {}, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphExtensionProcessorBase" + } + ], + "description": "A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension" + }, + "MediaGraphGrpcExtension": { + "required": [ + "dataTransfer" + ], + "properties": { + "dataTransfer": { + "description": "How media should be transferred to the inferencing engine.", + "$ref": "#/definitions/MediaGraphGrpcExtensionDataTransfer" + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphExtensionProcessorBase" + }, + {} + ], + "description": "A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphGrpcExtension" + }, + "MediaGraphGrpcExtensionDataTransfer": { + "required": [ + "mode" + ], + "properties": { + "sharedMemorySizeMiB": { + "type": "string", + "description": "The size of the buffer for all in-flight frames in mebibytes if mode is SharedMemory. Should not be specificed otherwise." + }, + "mode": { + "type": "string", + "description": "How frame data should be transmitted to the inferencing engine.", + "enum": [ + "Embedded", + "SharedMemory" + ], + "x-ms-enum": { + "name": "MediaGraphGrpcExtensionDataTransferMode", + "values": [ + { + "value": "Embedded", + "description": "Frames are transferred embedded into the gRPC messages." + }, + { + "value": "SharedMemory", + "description": "Frames are transferred through shared memory." + } + ], + "modelAsString": true + } + } + }, + "description": "Describes how media should be transferred to the inferencing engine.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphGrpcExtension" + }, + "MediaGraphHttpExtension": { + "allOf": [ + { + "$ref": "#/definitions/MediaGraphExtensionProcessorBase" + }, + {} + ], + "description": "A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphHttpExtension" + }, + "MediaGraphImage": { + "type": "object", + "properties": { + "scale": { + "$ref": "#/definitions/MediaGraphImageScale" + }, + "format": { + "$ref": "#/definitions/MediaGraphImageFormat" + } + }, + "description": "Describes the properties of an image frame." + }, + "MediaGraphImageScale": { + "type": "object", + "properties": { + "mode": { + "type": "string", + "description": "Describes the modes for scaling an input video frame into an image, before it is sent to an inference engine.", + "enum": [ + "PreserveAspectRatio", + "Pad", + "Stretch" + ], + "x-ms-enum": { + "name": "MediaGraphImageScaleMode", + "values": [ + { + "value": "PreserveAspectRatio", + "description": "Use the same aspect ratio as the input frame." + }, + { + "value": "Pad", + "description": "Center pad the input frame to match the given dimensions." + }, + { + "value": "Stretch", + "description": "Stretch input frame to match given dimensions." + } + ], + "modelAsString": true + } + }, + "width": { + "type": "string", + "description": "The desired output width of the image." + }, + "height": { + "type": "string", + "description": "The desired output height of the image." + } + }, + "description": "The scaling mode for the image." + }, + "MediaGraphImageFormat": { + "required": [ + "@type" + ], + "type": "object", + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + } + }, + "description": "Encoding settings for an image.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormat" + }, + "MediaGraphImageFormatRaw": { + "properties": { + "pixelFormat": { + "type": "string", + "description": "pixel format", + "enum": [ + "Yuv420p", + "Rgb565be", + "Rgb565le", + "Rgb555be", + "Rgb555le", + "Rgb24", + "Bgr24", + "Argb", + "Rgba", + "Abgr", + "Bgra" + ], + "x-ms-enum": { + "name": "MediaGraphImageFormatRawPixelFormat", + "values": [ + { + "value": "Yuv420p", + "description": "Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples)." + }, + { + "value": "Rgb565be", + "description": "Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian." + }, + { + "value": "Rgb565le", + "description": "Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian." + }, + { + "value": "Rgb555be", + "description": "Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined." + }, + { + "value": "Rgb555le", + "description": "Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined." + }, + { + "value": "Rgb24", + "description": "Packed RGB 8:8:8, 24bpp, RGBRGB." + }, + { + "value": "Bgr24", + "description": "Packed RGB 8:8:8, 24bpp, BGRBGR." + }, + { + "value": "Argb", + "description": "Packed ARGB 8:8:8:8, 32bpp, ARGBARGB." + }, + { + "value": "Rgba", + "description": "Packed RGBA 8:8:8:8, 32bpp, RGBARGBA." + }, + { + "value": "Abgr", + "description": "Packed ABGR 8:8:8:8, 32bpp, ABGRABGR." + }, + { + "value": "Bgra", + "description": "Packed BGRA 8:8:8:8, 32bpp, BGRABGRA." + } + ], + "modelAsString": true + } + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphImageFormat" + }, + {} + ], + "description": "Encoding settings for raw images.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatRaw" + }, + "MediaGraphImageFormatEncoded": { + "properties": { + "encoding": { + "type": "string", + "description": "The different encoding formats that can be used for the image.", + "default": "Jpeg", + "enum": [ + "Jpeg", + "Bmp", + "Png" + ], + "x-ms-enum": { + "name": "MediaGraphImageEncodingFormat", + "values": [ + { + "value": "Jpeg", + "description": "JPEG image format." + }, + { + "value": "Bmp", + "description": "BMP image format." + }, + { + "value": "Png", + "description": "PNG image format." + } + ], + "modelAsString": true + } + }, + "quality": { + "type": "string", + "description": "The image quality (used for JPEG only). Value must be between 0 to 100 (best quality)." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphImageFormat" + }, + {} + ], + "description": "Allowed formats for the image.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatEncoded" + }, + "MediaGraphSignalGateProcessor": { + "properties": { + "activationEvaluationWindow": { + "type": "string", + "example": "PT1.0S", + "description": "The period of time over which the gate gathers input events, before evaluating them." + }, + "activationSignalOffset": { + "type": "string", + "example": "-PT1.0S", + "description": "Signal offset once the gate is activated (can be negative). It is an offset between the time the event is received, and the timestamp of the first media sample (eg. video frame) that is allowed through by the gate." + }, + "minimumActivationTime": { + "type": "string", + "example": "PT1S", + "description": "The minimum period for which the gate remains open, in the absence of subsequent triggers (events)." + }, + "maximumActivationTime": { + "type": "string", + "example": "PT2S", + "description": "The maximum period for which the gate remains open, in the presence of subsequent events." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphProcessor" + }, + {} + ], + "description": "A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphSignalGateProcessor" + }, + "MediaGraphFrameRateFilterProcessor": { + "properties": { + "maximumFps": { + "type": "string", + "description": "Ensures that the frame rate of the video leaving this processor does not exceed this limit." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphProcessor" + }, + {} + ], + "description": "Limits the frame rate on the input video stream based on the maximumFps property.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphFrameRateFilterProcessor" + } + } +} diff --git a/sdk/media/azure-media-lva-edge/swagger/commandOutput.txt b/sdk/media/azure-media-lva-edge/swagger/commandOutput.txt new file mode 100644 index 000000000000..0290e6671f32 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/swagger/commandOutput.txt @@ -0,0 +1,158 @@ +AutoRest code generation utility [cli version: 3.0.6247; node: v12.16.1, max-memory: 2048 gb] +(C) 2018 Microsoft Corporation. +https://aka.ms/autorest +NOTE: AutoRest core version selected from configuration: 3.0.6302. + Loading AutoRest core 'C:\Users\hivyas\.autorest\@autorest_core@3.0.6302\node_modules\@autorest\core\dist' (3.0.6302) + Loading AutoRest extension '@autorest/python' (5.1.0-preview.7->5.1.0-preview.7) + Loading AutoRest extension '@autorest/modelerfour' (4.15.400->4.15.400) + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphTopologyListRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphTopologyGetRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphTopologyDeleteRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceListRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceGetRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceActivateRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceDeActivateRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceDeleteRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphUnsecuredEndpoint' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphCognitiveServicesVisionExtension' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphHttpExtension' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphInstanceCollection' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphTopologyCollection' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphRtspSource' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphIoTHubMessageSource' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphIoTHubMessageSink' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphUsernamePasswordCredentials' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphHttpHeaderCredentials' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphUnsecuredEndpoint' with an undefined type and 'allOf'/'anyOf'/'oneOf' is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphTlsEndpoint' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphPemCertificateList' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphOutputSelector' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphFileSink' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphAssetSink' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphMotionDetectionProcessor' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphExtensionProcessorBase' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphCognitiveServicesVisionExtension' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphGrpcExtension' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphGrpcExtensionDataTransfer' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphHttpExtension' with an undefined type and 'allOf'/'anyOf'/'oneOf' is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphImageFormatRaw' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphImageFormatEncoded' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphSignalGateProcessor' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphFrameRateFilterProcessor' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphRtspSource' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphIoTHubMessageSource' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphIoTHubMessageSink' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphUsernamePasswordCredentials' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphHttpHeaderCredentials' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphTlsEndpoint' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphPemCertificateList' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphFileSink' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphAssetSink' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphMotionDetectionProcessor' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphExtensionProcessorBase' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphGrpcExtension' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphImageFormatRaw' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphImageFormatEncoded' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphSignalGateProcessor' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphFrameRateFilterProcessor' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/CheckDuplicateSchemas): Checking for duplicate schemas, this could take a (long) while. Run with --verbose for more detail. + +WARNING (Modeler/MissingType): The schema 'components·109p5kc·schemas·mediagraphrtspsource·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·109p5kc·schemas·mediagraphrtspsource·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1af9g39·schemas·mediagraphiothubmessagesource·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1af9g39·schemas·mediagraphiothubmessagesource·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1jngw4h·schemas·mediagraphiothubmessagesink·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1jngw4h·schemas·mediagraphiothubmessagesink·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1mxkvbd·schemas·mediagraphusernamepasswordcredentials·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1mxkvbd·schemas·mediagraphusernamepasswordcredentials·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1uqp1b7·schemas·mediagraphhttpheadercredentials·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1uqp1b7·schemas·mediagraphhttpheadercredentials·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·q7dsz6·schemas·mediagraphtlsendpoint·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·q7dsz6·schemas·mediagraphtlsendpoint·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·7b4k0z·schemas·mediagraphpemcertificatelist·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·7b4k0z·schemas·mediagraphpemcertificatelist·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1nh92cj·schemas·mediagraphfilesink·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1nh92cj·schemas·mediagraphfilesink·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·z5bgs5·schemas·mediagraphassetsink·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·z5bgs5·schemas·mediagraphassetsink·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1vu24mc·schemas·mediagraphmotiondetectionprocessor·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1vu24mc·schemas·mediagraphmotiondetectionprocessor·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1axip85·schemas·mediagraphextensionprocessorbase·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1axip85·schemas·mediagraphextensionprocessorbase·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1yl8gs2·schemas·mediagraphgrpcextension·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1yl8gs2·schemas·mediagraphgrpcextension·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1k6pka5·schemas·mediagraphimageformatraw·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1k6pka5·schemas·mediagraphimageformatraw·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·nnu6mb·schemas·mediagraphimageformatencoded·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·nnu6mb·schemas·mediagraphimageformatencoded·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·dx5boa·schemas·mediagraphsignalgateprocessor·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·dx5boa·schemas·mediagraphsignalgateprocessor·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1hcm6ag·schemas·mediagraphframeratefilterprocessor·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1hcm6ag·schemas·mediagraphframeratefilterprocessor·allof·1 +Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py b/sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py new file mode 100644 index 000000000000..53b2dcb4ba92 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py @@ -0,0 +1,79 @@ +import asyncio +import functools +import os + +from azure_devtools.scenario_tests.utilities import trim_kwargs_from_test_function +from devtools_testutils.azure_testcase import _is_autorest_v3 + +from .testcase import AppConfigTestCase + +class AsyncAppConfigTestCase(AppConfigTestCase): + def __init__(self, *args, **kwargs): + super(AppConfigTestCase, self).__init__(*args, **kwargs) + + class AsyncFakeCredential(object): + # fake async credential + async def get_token(self, *scopes, **kwargs): + return AccessToken('fake_token', 2527537086) + + async def close(self): + pass + + def create_basic_client(self, client_class, **kwargs): + # This is the patch for creating client using aio identity + + tenant_id = os.environ.get("AZURE_TENANT_ID", None) + client_id = os.environ.get("AZURE_CLIENT_ID", None) + secret = os.environ.get("AZURE_CLIENT_SECRET", None) + + if tenant_id and client_id and secret and self.is_live: + if _is_autorest_v3(client_class): + # Create azure-identity class using aio credential + from azure.identity.aio import ClientSecretCredential + credentials = ClientSecretCredential( + tenant_id=tenant_id, + client_id=client_id, + client_secret=secret + ) + else: + # Create msrestazure class + from msrestazure.azure_active_directory import ServicePrincipalCredentials + credentials = ServicePrincipalCredentials( + tenant=tenant_id, + client_id=client_id, + secret=secret + ) + else: + if _is_autorest_v3(client_class): + credentials = self.AsyncFakeCredential() + #credentials = self.settings.get_azure_core_credentials() + else: + credentials = self.settings.get_credentials() + + # Real client creation + # FIXME decide what is the final argument for that + # if self.is_playback(): + # kwargs.setdefault("polling_interval", 0) + if _is_autorest_v3(client_class): + kwargs.setdefault("logging_enable", True) + client = client_class( + credential=credentials, + **kwargs + ) + else: + client = client_class( + credentials=credentials, + **kwargs + ) + + if self.is_playback(): + try: + client._config.polling_interval = 0 # FIXME in azure-mgmt-core, make this a kwargs + except AttributeError: + pass + + if hasattr(client, "config"): # Autorest v2 + if self.is_playback(): + client.config.long_running_operation_timeout = 0 + client.config.enable_http_logger = True + return client diff --git a/sdk/media/azure-media-lva-edge/tests/_shared/testcase.py b/sdk/media/azure-media-lva-edge/tests/_shared/testcase.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/media/azure-media-lva-edge/tests/conftest.py b/sdk/media/azure-media-lva-edge/tests/conftest.py new file mode 100644 index 000000000000..c36aaed14908 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/tests/conftest.py @@ -0,0 +1,25 @@ +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- diff --git a/sdk/media/azure-media-lva-edge/tests/test_app_config.py b/sdk/media/azure-media-lva-edge/tests/test_app_config.py new file mode 100644 index 000000000000..5871ed8eef2f --- /dev/null +++ b/sdk/media/azure-media-lva-edge/tests/test_app_config.py @@ -0,0 +1 @@ +import pytest diff --git a/sdk/media/ci.yml b/sdk/media/ci.yml index 58a0d6292800..2d63019f2b80 100644 --- a/sdk/media/ci.yml +++ b/sdk/media/ci.yml @@ -30,3 +30,6 @@ extends: Artifacts: - name: azure_mgmt_media safeName: azuremgmtmedia + - name: azure_media_lva_edge + safeName: azuremedialvaedge + From 114b57e532365e6b26a7d43dd783f72a9d8b5bcb Mon Sep 17 00:00:00 2001 From: hivyas Date: Tue, 17 Nov 2020 15:29:53 -0800 Subject: [PATCH 02/64] removing tests and hardcoded strings --- sdk/media/azure-media-lva-edge/MANIFEST.in | 1 - .../samples/sample_lva.py | 2 +- .../azure-media-lva-edge/swagger/README.md | 2 +- .../swagger/appconfiguration.json | 1239 ----------------- .../tests/_shared/asynctestcase.py | 79 -- .../tests/_shared/testcase.py | 0 .../azure-media-lva-edge/tests/conftest.py | 25 - .../tests/test_app_config.py | 1 - 8 files changed, 2 insertions(+), 1347 deletions(-) delete mode 100644 sdk/media/azure-media-lva-edge/swagger/appconfiguration.json delete mode 100644 sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py delete mode 100644 sdk/media/azure-media-lva-edge/tests/_shared/testcase.py delete mode 100644 sdk/media/azure-media-lva-edge/tests/conftest.py delete mode 100644 sdk/media/azure-media-lva-edge/tests/test_app_config.py diff --git a/sdk/media/azure-media-lva-edge/MANIFEST.in b/sdk/media/azure-media-lva-edge/MANIFEST.in index 7ebdd947f8ff..4a340e3b7f85 100644 --- a/sdk/media/azure-media-lva-edge/MANIFEST.in +++ b/sdk/media/azure-media-lva-edge/MANIFEST.in @@ -1,4 +1,3 @@ -recursive-include tests *.py include *.md include azure/__init__.py recursive-include samples *.py *.md diff --git a/sdk/media/azure-media-lva-edge/samples/sample_lva.py b/sdk/media/azure-media-lva-edge/samples/sample_lva.py index 9ac9ca9a817a..9b5e91818af6 100644 --- a/sdk/media/azure-media-lva-edge/samples/sample_lva.py +++ b/sdk/media/azure-media-lva-edge/samples/sample_lva.py @@ -8,7 +8,7 @@ device_id = "lva-sample-device" module_d = "lvaEdge" -connection_string = "HostName=lvasamplehub77xvrvtar2bpw.azure-devices.net;SharedAccessKeyName=iothubowner;SharedAccessKey=o77hgzsswnBZsaGKVSDjSmm53m4ViJb/s1xv9zfDCi0=" +connection_string = os.getenv("IOTHUB_DEVICE_CONNECTION_STRING") graph_instance_name = "graphInstance1" graph_topology_name = "graphTopology1" diff --git a/sdk/media/azure-media-lva-edge/swagger/README.md b/sdk/media/azure-media-lva-edge/swagger/README.md index 7880fc364c91..e80c97ff0f3c 100644 --- a/sdk/media/azure-media-lva-edge/swagger/README.md +++ b/sdk/media/azure-media-lva-edge/swagger/README.md @@ -11,7 +11,7 @@ autorest --v3 --python README.md ### Settings ```yaml -require: C:\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md +require: <>Azure\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md output-folder: ../azure/media/lva/edge/_generated namespace: azure.media.lva.edge no-namespace-folders: true diff --git a/sdk/media/azure-media-lva-edge/swagger/appconfiguration.json b/sdk/media/azure-media-lva-edge/swagger/appconfiguration.json deleted file mode 100644 index 36b206ca6142..000000000000 --- a/sdk/media/azure-media-lva-edge/swagger/appconfiguration.json +++ /dev/null @@ -1,1239 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "description": "Direct Methods for Live Video Analytics on IoT Edge.", - "version": "1.0.4", - "title": "Direct Methods for Live Video Analytics on IoT Edge", - "contact": { - "email": "amshelp@microsoft.com" - } - }, - "security": [ - { - "sharedAccessSignature": [] - } - ], - "paths": {}, - "securityDefinitions": { - "sharedAccessSignature": { - "type": "apiKey", - "name": "Authorization", - "in": "header" - } - }, - "definitions": { - "OperationBase": { - "type": "object", - "properties": { - "methodName": { - "type": "string", - "description": "method name", - "readOnly": true - }, - "@apiVersion": { - "type": "string", - "description": "api version", - "enum": [ - "1.0" - ], - "x-ms-enum": { - "name": "ApiVersionEnum", - "modelAsString": false - } - } - }, - "discriminator": "methodName" - }, - "MediaGraphTopologySetRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphTopologySet", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - } - ], - "required": [ - "graph" - ], - "properties": { - "graph": { - "$ref": "#/definitions/MediaGraphTopology" - } - } - }, - "MediaGraphTopologySetRequestBody": { - "type": "object", - "x-ms-discriminator-value": "GraphTopologySet", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - }, - { - "$ref": "#/definitions/MediaGraphTopology" - } - ] - }, - "MediaGraphInstanceSetRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceSet", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - } - ], - "required": [ - "instance" - ], - "properties": { - "instance": { - "$ref": "#/definitions/MediaGraphInstance" - } - } - }, - "ItemNonSetRequestBase": { - "type": "object", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - } - ], - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string", - "description": "method name" - } - } - }, - "MediaGraphTopologyListRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphTopologyList", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - } - ] - }, - "MediaGraphTopologyGetRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphTopologyGet", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphTopologyDeleteRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphTopologyDelete", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphInstanceListRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceList", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - } - ] - }, - "MediaGraphInstanceGetRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceGet", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphInstanceActivateRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceActivate", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphInstanceDeActivateRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceDeactivate", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphInstanceDeleteRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceDelete", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphInstance": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string", - "description": "name" - }, - "systemData": { - "$ref": "#/definitions/MediaGraphSystemData" - }, - "properties": { - "$ref": "#/definitions/MediaGraphInstanceProperties" - } - }, - "description": "Represents a Media Graph instance." - }, - "MediaGraphInstanceProperties": { - "type": "object", - "properties": { - "description": { - "type": "string", - "description": "An optional description for the instance." - }, - "topologyName": { - "type": "string", - "description": "The name of the graph topology that this instance will run. A topology with this name should already have been set in the Edge module." - }, - "parameters": { - "type": "array", - "description": "List of one or more graph instance parameters.", - "items": { - "$ref": "#/definitions/MediaGraphParameterDefinition" - } - }, - "state": { - "type": "string", - "description": "Allowed states for a graph Instance.", - "enum": [ - "Inactive", - "Activating", - "Active", - "Deactivating" - ], - "x-ms-enum": { - "name": "MediaGraphInstanceState", - "values": [ - { - "value": "Inactive", - "description": "Inactive state." - }, - { - "value": "Activating", - "description": "Activating state." - }, - { - "value": "Active", - "description": "Active state." - }, - { - "value": "Deactivating", - "description": "Deactivating state." - } - ], - "modelAsString": false - } - } - }, - "description": "Properties of a Media Graph instance." - }, - "MediaGraphParameterDefinition": { - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "type": "string", - "description": "Name of parameter as defined in the graph topology." - }, - "value": { - "type": "string", - "description": "Value of parameter." - } - }, - "description": "A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters." - }, - "MediaGraphInstanceCollection": { - "properties": { - "value": { - "type": "array", - "description": "Collection of graph instances.", - "items": { - "$ref": "#/definitions/MediaGraphInstance" - } - }, - "@continuationToken": { - "type": "string", - "description": "Continuation token to use in subsequent calls to enumerate through the graph instance collection (when the collection contains too many results to return in one response)." - } - }, - "description": "Collection of graph instances." - }, - "MediaGraphTopologyCollection": { - "properties": { - "value": { - "type": "array", - "description": "Collection of graph topologies.", - "items": { - "$ref": "#/definitions/MediaGraphTopology" - } - }, - "@continuationToken": { - "type": "string", - "description": "Continuation token to use in subsequent calls to enumerate through the graph topologies collection (when the collection contains too many results to return in one response)." - } - }, - "description": "Collection of graph topologies." - }, - "MediaGraphTopology": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string", - "description": "name" - }, - "systemData": { - "$ref": "#/definitions/MediaGraphSystemData" - }, - "properties": { - "$ref": "#/definitions/MediaGraphTopologyProperties" - } - }, - "description": "Describes a graph topology." - }, - "MediaGraphTopologyProperties": { - "type": "object", - "properties": { - "description": { - "type": "string", - "description": "An optional description for the instance." - }, - "parameters": { - "type": "array", - "description": "An optional description for the instance.", - "items": { - "$ref": "#/definitions/MediaGraphParameterDeclaration" - } - }, - "sources": { - "type": "array", - "description": "An optional description for the instance.", - "items": { - "$ref": "#/definitions/MediaGraphSource" - } - }, - "processors": { - "type": "array", - "description": "An optional description for the instance.", - "items": { - "$ref": "#/definitions/MediaGraphProcessor" - } - }, - "sinks": { - "description": "name", - "type": "array", - "items": { - "$ref": "#/definitions/MediaGraphSink" - } - } - }, - "description": "Describes the properties of a graph topology." - }, - "MediaGraphSystemData": { - "type": "object", - "properties": { - "createdAt": { - "type": "string", - "format": "date-time", - "description": "The timestamp of resource creation (UTC)." - }, - "lastModifiedAt": { - "type": "string", - "format": "date-time", - "description": "The timestamp of resource last modification (UTC)." - } - }, - "description": "Graph system data." - }, - "MediaGraphParameterDeclaration": { - "type": "object", - "required": [ - "name", - "type" - ], - "properties": { - "name": { - "type": "string", - "description": "The name of the parameter.", - "maxLength": 64 - }, - "type": { - "type": "string", - "description": "name", - "enum": [ - "String", - "SecretString", - "Int", - "Double", - "Bool" - ], - "x-ms-enum": { - "name": "MediaGraphParameterType", - "values": [ - { - "value": "String", - "description": "A string parameter value." - }, - { - "value": "SecretString", - "description": "A string to hold sensitive information as parameter value." - }, - { - "value": "Int", - "description": "A 32-bit signed integer as parameter value." - }, - { - "value": "Double", - "description": "A 64-bit double-precision floating point type as parameter value." - }, - { - "value": "Bool", - "description": "A boolean value that is either true or false." - } - ], - "modelAsString": false - } - }, - "description": { - "type": "string", - "description": "Description of the parameter." - }, - "default": { - "type": "string", - "description": "The default value for the parameter, to be used if the graph instance does not specify a value." - } - }, - "description": "The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters." - }, - "MediaGraphSource": { - "type": "object", - "required": [ - "@type", - "name" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The type of the source node. The discriminator for derived types." - }, - "name": { - "type": "string", - "description": "The name to be used for this source node." - } - }, - "description": "Media graph source." - }, - "MediaGraphRtspSource": { - "properties": { - "transport": { - "type": "string", - "description": "Underlying RTSP transport. This is used to enable or disable HTTP tunneling.", - "enum": [ - "Http", - "Tcp" - ], - "x-ms-enum": { - "name": "MediaGraphRtspTransport", - "values": [ - { - "value": "Http", - "description": "HTTP/HTTPS transport. This should be used when HTTP tunneling is desired." - }, - { - "value": "Tcp", - "description": "TCP transport. This should be used when HTTP tunneling is NOT desired." - } - ], - "modelAsString": true - } - }, - "endpoint": { - "description": "RTSP endpoint of the stream that is being connected to.", - "$ref": "#/definitions/MediaGraphEndpoint" - } - }, - "required": [ - "endpoint" - ], - "allOf": [ - { - "$ref": "#/definitions/MediaGraphSource" - }, - {} - ], - "description": "Enables a graph to capture media from a RTSP server.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphRtspSource" - }, - "MediaGraphIoTHubMessageSource": { - "properties": { - "hubInputName": { - "type": "string", - "description": "Name of the input path where messages can be routed to (via routes declared in the IoT Edge deployment manifest)." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphSource" - }, - {} - ], - "description": "Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphIoTHubMessageSource" - }, - "MediaGraphIoTHubMessageSink": { - "properties": { - "hubOutputName": { - "type": "string", - "description": "Name of the output path to which the graph will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphSink" - }, - {} - ], - "description": "Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphIoTHubMessageSink" - }, - "MediaGraphEndpoint": { - "type": "object", - "required": [ - "@type", - "url" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - }, - "credentials": { - "description": "Polymorphic credentials to be presented to the endpoint.", - "$ref": "#/definitions/MediaGraphCredentials" - }, - "url": { - "type": "string", - "description": "Url for the endpoint." - } - }, - "description": "Base class for endpoints." - }, - "MediaGraphCredentials": { - "type": "object", - "required": [ - "@type" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - } - }, - "description": "Credentials to present during authentication." - }, - "MediaGraphUsernamePasswordCredentials": { - "properties": { - "username": { - "type": "string", - "description": "Username for a username/password pair." - }, - "password": { - "type": "string", - "description": "Password for a username/password pair." - } - }, - "required": [ - "username" - ], - "allOf": [ - { - "$ref": "#/definitions/MediaGraphCredentials" - }, - {} - ], - "description": "Username/password credential pair.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphUsernamePasswordCredentials" - }, - "MediaGraphHttpHeaderCredentials": { - "properties": { - "headerName": { - "type": "string", - "description": "HTTP header name." - }, - "headerValue": { - "type": "string", - "description": "HTTP header value." - } - }, - "required": [ - "headerName", - "headerValue" - ], - "allOf": [ - { - "$ref": "#/definitions/MediaGraphCredentials" - }, - {} - ], - "description": "Http header service credentials.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphHttpHeaderCredentials" - }, - "MediaGraphUnsecuredEndpoint": { - "allOf": [ - { - "$ref": "#/definitions/MediaGraphEndpoint" - }, - {} - ], - "description": "An endpoint that the media graph can connect to, with no encryption in transit.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphUnsecuredEndpoint" - }, - "MediaGraphTlsEndpoint": { - "properties": { - "trustedCertificates": { - "description": "Trusted certificates when authenticating a TLS connection. Null designates that Azure Media Service's source of trust should be used.", - "$ref": "#/definitions/MediaGraphCertificateSource" - }, - "validationOptions": { - "description": "Validation options to use when authenticating a TLS connection. By default, strict validation is used.", - "$ref": "#/definitions/MediaGraphTlsValidationOptions" - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphEndpoint" - }, - {} - ], - "description": "An endpoint that the graph can connect to, which must be connected over TLS/SSL.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphTlsEndpoint" - }, - "MediaGraphCertificateSource": { - "type": "object", - "required": [ - "@type" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - } - }, - "description": "Base class for certificate sources." - }, - "MediaGraphTlsValidationOptions": { - "type": "object", - "properties": { - "ignoreHostname": { - "type": "string", - "description": "Boolean value ignoring the host name (common name) during validation." - }, - "ignoreSignature": { - "type": "string", - "description": "Boolean value ignoring the integrity of the certificate chain at the current time." - } - }, - "description": "Options for controlling the authentication of TLS endpoints." - }, - "MediaGraphPemCertificateList": { - "properties": { - "certificates": { - "type": "array", - "description": "PEM formatted public certificates one per entry.", - "items": { - "type": "string" - } - } - }, - "required": [ - "certificates" - ], - "allOf": [ - { - "$ref": "#/definitions/MediaGraphCertificateSource" - }, - {} - ], - "description": "A list of PEM formatted certificates.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphPemCertificateList" - }, - "MediaGraphSink": { - "type": "object", - "required": [ - "@type", - "inputs", - "name" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - }, - "name": { - "type": "string", - "description": "Name to be used for the media graph sink." - }, - "inputs": { - "type": "array", - "description": "An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node.", - "items": { - "$ref": "#/definitions/MediaGraphNodeInput" - } - } - }, - "description": "Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module." - }, - "MediaGraphNodeInput": { - "type": "object", - "properties": { - "nodeName": { - "type": "string", - "description": "The name of another node in the media graph, the output of which is used as input to this node." - }, - "outputSelectors": { - "type": "array", - "description": "Allows for the selection of particular streams from another node.", - "items": { - "$ref": "#/definitions/MediaGraphOutputSelector" - } - } - }, - "description": "Represents the input to any node in a media graph." - }, - "MediaGraphOutputSelector": { - "properties": { - "property": { - "type": "string", - "description": "The stream property to compare with.", - "enum": [ - "mediaType" - ], - "x-ms-enum": { - "name": "MediaGraphOutputSelectorProperty", - "values": [ - { - "value": "mediaType", - "description": "The stream's MIME type or subtype." - } - ], - "modelAsString": false - } - }, - "operator": { - "type": "string", - "description": "The operator to compare streams by.", - "enum": [ - "is", - "isNot" - ], - "x-ms-enum": { - "name": "MediaGraphOutputSelectorOperator", - "values": [ - { - "value": "is", - "description": "A media type is the same type or a subtype." - }, - { - "value": "isNot", - "description": "A media type is not the same type or a subtype." - } - ], - "modelAsString": false - } - }, - "value": { - "type": "string", - "description": "Value to compare against." - } - }, - "description": "Allows for the selection of particular streams from another node." - }, - "MediaGraphFileSink": { - "properties": { - "filePathPattern": { - "type": "string", - "description": "Absolute file path pattern for creating new files on the Edge device.", - "minLength": 1 - } - }, - "required": [ - "filePathPattern" - ], - "allOf": [ - { - "$ref": "#/definitions/MediaGraphSink" - }, - {} - ], - "description": "Enables a media graph to write/store media (video and audio) to a file on the Edge device.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphFileSink" - }, - "MediaGraphAssetSink": { - "properties": { - "assetNamePattern": { - "type": "string", - "description": "A name pattern when creating new assets." - }, - "segmentLength": { - "type": "string", - "format": "duration", - "example": "PT30S", - "description": "When writing media to an asset, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum of 30 seconds and a recommended maximum of 5 minutes." - }, - "localMediaCachePath": { - "type": "string", - "description": "Path to a local file system directory for temporary caching of media, before writing to an Asset. Used when the Edge device is temporarily disconnected from Azure." - }, - "localMediaCacheMaximumSizeMiB": { - "type": "string", - "description": "Maximum amount of disk space that can be used for temporary caching of media." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphSink" - }, - {} - ], - "description": "Enables a graph to record media to an Azure Media Services asset, for subsequent playback.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphAssetSink" - }, - "MediaGraphProcessor": { - "type": "object", - "required": [ - "@type", - "inputs", - "name" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - }, - "name": { - "type": "string", - "description": "The name for this processor node." - }, - "inputs": { - "type": "array", - "description": "An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node.", - "items": { - "$ref": "#/definitions/MediaGraphNodeInput" - } - } - }, - "description": "A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output." - }, - "MediaGraphMotionDetectionProcessor": { - "properties": { - "sensitivity": { - "type": "string", - "description": "Enumeration that specifies the sensitivity of the motion detection processor.", - "enum": [ - "Low", - "Medium", - "High" - ], - "x-ms-enum": { - "name": "MediaGraphMotionDetectionSensitivity", - "values": [ - { - "value": "Low", - "description": "Low Sensitivity." - }, - { - "value": "Medium", - "description": "Medium Sensitivity." - }, - { - "value": "High", - "description": "High Sensitivity." - } - ], - "modelAsString": true - } - }, - "outputMotionRegion": { - "type": "boolean", - "description": "Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphProcessor" - }, - {} - ], - "description": "A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphMotionDetectionProcessor" - }, - "MediaGraphExtensionProcessorBase": { - "properties": { - "endpoint": { - "description": "Endpoint to which this processor should connect.", - "$ref": "#/definitions/MediaGraphEndpoint" - }, - "image": { - "description": "Describes the parameters of the image that is sent as input to the endpoint.", - "$ref": "#/definitions/MediaGraphImage" - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphProcessor" - }, - {} - ], - "description": "Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphExtensionProcessorBase" - }, - "MediaGraphCognitiveServicesVisionExtension": { - "properties": {}, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphExtensionProcessorBase" - } - ], - "description": "A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension" - }, - "MediaGraphGrpcExtension": { - "required": [ - "dataTransfer" - ], - "properties": { - "dataTransfer": { - "description": "How media should be transferred to the inferencing engine.", - "$ref": "#/definitions/MediaGraphGrpcExtensionDataTransfer" - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphExtensionProcessorBase" - }, - {} - ], - "description": "A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphGrpcExtension" - }, - "MediaGraphGrpcExtensionDataTransfer": { - "required": [ - "mode" - ], - "properties": { - "sharedMemorySizeMiB": { - "type": "string", - "description": "The size of the buffer for all in-flight frames in mebibytes if mode is SharedMemory. Should not be specificed otherwise." - }, - "mode": { - "type": "string", - "description": "How frame data should be transmitted to the inferencing engine.", - "enum": [ - "Embedded", - "SharedMemory" - ], - "x-ms-enum": { - "name": "MediaGraphGrpcExtensionDataTransferMode", - "values": [ - { - "value": "Embedded", - "description": "Frames are transferred embedded into the gRPC messages." - }, - { - "value": "SharedMemory", - "description": "Frames are transferred through shared memory." - } - ], - "modelAsString": true - } - } - }, - "description": "Describes how media should be transferred to the inferencing engine.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphGrpcExtension" - }, - "MediaGraphHttpExtension": { - "allOf": [ - { - "$ref": "#/definitions/MediaGraphExtensionProcessorBase" - }, - {} - ], - "description": "A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphHttpExtension" - }, - "MediaGraphImage": { - "type": "object", - "properties": { - "scale": { - "$ref": "#/definitions/MediaGraphImageScale" - }, - "format": { - "$ref": "#/definitions/MediaGraphImageFormat" - } - }, - "description": "Describes the properties of an image frame." - }, - "MediaGraphImageScale": { - "type": "object", - "properties": { - "mode": { - "type": "string", - "description": "Describes the modes for scaling an input video frame into an image, before it is sent to an inference engine.", - "enum": [ - "PreserveAspectRatio", - "Pad", - "Stretch" - ], - "x-ms-enum": { - "name": "MediaGraphImageScaleMode", - "values": [ - { - "value": "PreserveAspectRatio", - "description": "Use the same aspect ratio as the input frame." - }, - { - "value": "Pad", - "description": "Center pad the input frame to match the given dimensions." - }, - { - "value": "Stretch", - "description": "Stretch input frame to match given dimensions." - } - ], - "modelAsString": true - } - }, - "width": { - "type": "string", - "description": "The desired output width of the image." - }, - "height": { - "type": "string", - "description": "The desired output height of the image." - } - }, - "description": "The scaling mode for the image." - }, - "MediaGraphImageFormat": { - "required": [ - "@type" - ], - "type": "object", - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - } - }, - "description": "Encoding settings for an image.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormat" - }, - "MediaGraphImageFormatRaw": { - "properties": { - "pixelFormat": { - "type": "string", - "description": "pixel format", - "enum": [ - "Yuv420p", - "Rgb565be", - "Rgb565le", - "Rgb555be", - "Rgb555le", - "Rgb24", - "Bgr24", - "Argb", - "Rgba", - "Abgr", - "Bgra" - ], - "x-ms-enum": { - "name": "MediaGraphImageFormatRawPixelFormat", - "values": [ - { - "value": "Yuv420p", - "description": "Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples)." - }, - { - "value": "Rgb565be", - "description": "Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian." - }, - { - "value": "Rgb565le", - "description": "Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian." - }, - { - "value": "Rgb555be", - "description": "Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined." - }, - { - "value": "Rgb555le", - "description": "Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined." - }, - { - "value": "Rgb24", - "description": "Packed RGB 8:8:8, 24bpp, RGBRGB." - }, - { - "value": "Bgr24", - "description": "Packed RGB 8:8:8, 24bpp, BGRBGR." - }, - { - "value": "Argb", - "description": "Packed ARGB 8:8:8:8, 32bpp, ARGBARGB." - }, - { - "value": "Rgba", - "description": "Packed RGBA 8:8:8:8, 32bpp, RGBARGBA." - }, - { - "value": "Abgr", - "description": "Packed ABGR 8:8:8:8, 32bpp, ABGRABGR." - }, - { - "value": "Bgra", - "description": "Packed BGRA 8:8:8:8, 32bpp, BGRABGRA." - } - ], - "modelAsString": true - } - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphImageFormat" - }, - {} - ], - "description": "Encoding settings for raw images.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatRaw" - }, - "MediaGraphImageFormatEncoded": { - "properties": { - "encoding": { - "type": "string", - "description": "The different encoding formats that can be used for the image.", - "default": "Jpeg", - "enum": [ - "Jpeg", - "Bmp", - "Png" - ], - "x-ms-enum": { - "name": "MediaGraphImageEncodingFormat", - "values": [ - { - "value": "Jpeg", - "description": "JPEG image format." - }, - { - "value": "Bmp", - "description": "BMP image format." - }, - { - "value": "Png", - "description": "PNG image format." - } - ], - "modelAsString": true - } - }, - "quality": { - "type": "string", - "description": "The image quality (used for JPEG only). Value must be between 0 to 100 (best quality)." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphImageFormat" - }, - {} - ], - "description": "Allowed formats for the image.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatEncoded" - }, - "MediaGraphSignalGateProcessor": { - "properties": { - "activationEvaluationWindow": { - "type": "string", - "example": "PT1.0S", - "description": "The period of time over which the gate gathers input events, before evaluating them." - }, - "activationSignalOffset": { - "type": "string", - "example": "-PT1.0S", - "description": "Signal offset once the gate is activated (can be negative). It is an offset between the time the event is received, and the timestamp of the first media sample (eg. video frame) that is allowed through by the gate." - }, - "minimumActivationTime": { - "type": "string", - "example": "PT1S", - "description": "The minimum period for which the gate remains open, in the absence of subsequent triggers (events)." - }, - "maximumActivationTime": { - "type": "string", - "example": "PT2S", - "description": "The maximum period for which the gate remains open, in the presence of subsequent events." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphProcessor" - }, - {} - ], - "description": "A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphSignalGateProcessor" - }, - "MediaGraphFrameRateFilterProcessor": { - "properties": { - "maximumFps": { - "type": "string", - "description": "Ensures that the frame rate of the video leaving this processor does not exceed this limit." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphProcessor" - }, - {} - ], - "description": "Limits the frame rate on the input video stream based on the maximumFps property.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphFrameRateFilterProcessor" - } - } -} diff --git a/sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py b/sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py deleted file mode 100644 index 53b2dcb4ba92..000000000000 --- a/sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py +++ /dev/null @@ -1,79 +0,0 @@ -import asyncio -import functools -import os - -from azure_devtools.scenario_tests.utilities import trim_kwargs_from_test_function -from devtools_testutils.azure_testcase import _is_autorest_v3 - -from .testcase import AppConfigTestCase - -class AsyncAppConfigTestCase(AppConfigTestCase): - def __init__(self, *args, **kwargs): - super(AppConfigTestCase, self).__init__(*args, **kwargs) - - class AsyncFakeCredential(object): - # fake async credential - async def get_token(self, *scopes, **kwargs): - return AccessToken('fake_token', 2527537086) - - async def close(self): - pass - - def create_basic_client(self, client_class, **kwargs): - # This is the patch for creating client using aio identity - - tenant_id = os.environ.get("AZURE_TENANT_ID", None) - client_id = os.environ.get("AZURE_CLIENT_ID", None) - secret = os.environ.get("AZURE_CLIENT_SECRET", None) - - if tenant_id and client_id and secret and self.is_live: - if _is_autorest_v3(client_class): - # Create azure-identity class using aio credential - from azure.identity.aio import ClientSecretCredential - credentials = ClientSecretCredential( - tenant_id=tenant_id, - client_id=client_id, - client_secret=secret - ) - else: - # Create msrestazure class - from msrestazure.azure_active_directory import ServicePrincipalCredentials - credentials = ServicePrincipalCredentials( - tenant=tenant_id, - client_id=client_id, - secret=secret - ) - else: - if _is_autorest_v3(client_class): - credentials = self.AsyncFakeCredential() - #credentials = self.settings.get_azure_core_credentials() - else: - credentials = self.settings.get_credentials() - - # Real client creation - # FIXME decide what is the final argument for that - # if self.is_playback(): - # kwargs.setdefault("polling_interval", 0) - if _is_autorest_v3(client_class): - kwargs.setdefault("logging_enable", True) - client = client_class( - credential=credentials, - **kwargs - ) - else: - client = client_class( - credentials=credentials, - **kwargs - ) - - if self.is_playback(): - try: - client._config.polling_interval = 0 # FIXME in azure-mgmt-core, make this a kwargs - except AttributeError: - pass - - if hasattr(client, "config"): # Autorest v2 - if self.is_playback(): - client.config.long_running_operation_timeout = 0 - client.config.enable_http_logger = True - return client diff --git a/sdk/media/azure-media-lva-edge/tests/_shared/testcase.py b/sdk/media/azure-media-lva-edge/tests/_shared/testcase.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/sdk/media/azure-media-lva-edge/tests/conftest.py b/sdk/media/azure-media-lva-edge/tests/conftest.py deleted file mode 100644 index c36aaed14908..000000000000 --- a/sdk/media/azure-media-lva-edge/tests/conftest.py +++ /dev/null @@ -1,25 +0,0 @@ -# -------------------------------------------------------------------------- -# -# Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -# -------------------------------------------------------------------------- diff --git a/sdk/media/azure-media-lva-edge/tests/test_app_config.py b/sdk/media/azure-media-lva-edge/tests/test_app_config.py deleted file mode 100644 index 5871ed8eef2f..000000000000 --- a/sdk/media/azure-media-lva-edge/tests/test_app_config.py +++ /dev/null @@ -1 +0,0 @@ -import pytest From d5c57301b23b477b57230905b72c79bd0a45822c Mon Sep 17 00:00:00 2001 From: hivyas Date: Wed, 18 Nov 2020 15:38:51 -0800 Subject: [PATCH 03/64] fixed tox errors --- .../azure-media-lva-edge/azure/__init__.py | 2 +- .../azure/media/__init__.py | 1 + .../azure/media/lva/__init__.py | 1 + .../azure/media/lva/edge/__init__.py | 25 ++++++++++--------- .../azure-media-lva-edge/swagger/README.md | 4 +-- .../tests/test_app_config.py | 5 ++++ 6 files changed, 23 insertions(+), 15 deletions(-) create mode 100644 sdk/media/azure-media-lva-edge/azure/media/__init__.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/__init__.py create mode 100644 sdk/media/azure-media-lva-edge/tests/test_app_config.py diff --git a/sdk/media/azure-media-lva-edge/azure/__init__.py b/sdk/media/azure-media-lva-edge/azure/__init__.py index 0e40e134bdac..e7590fb185e8 100644 --- a/sdk/media/azure-media-lva-edge/azure/__init__.py +++ b/sdk/media/azure-media-lva-edge/azure/__init__.py @@ -4,4 +4,4 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------- -__path__ = __import__("pkgutil").extend_path(__path__, __name__) \ No newline at end of file +__path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/sdk/media/azure-media-lva-edge/azure/media/__init__.py b/sdk/media/azure-media-lva-edge/azure/media/__init__.py new file mode 100644 index 000000000000..69e3be50dac4 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/__init__.py b/sdk/media/azure-media-lva-edge/azure/media/lva/__init__.py new file mode 100644 index 000000000000..69e3be50dac4 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py index 725cd6860541..2a9c3cc68e52 100644 --- a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py @@ -1,20 +1,21 @@ __path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore -from azure.media.lva.edge._generated.models import MediaGraphTopologySetRequestBody, MediaGraphTopologySetRequest, MediaGraphInstanceSetRequest, MediaGraphInstanceSetRequestBody +from azure.media.lva.edge._generated.models import (MediaGraphTopologySetRequestBody, +MediaGraphTopologySetRequest, MediaGraphInstanceSetRequest, MediaGraphInstanceSetRequestBody) def _OverrideTopologySetRequestSerialize(self): - graph_body = MediaGraphTopologySetRequestBody(name=self.graph.name) - graph_body.system_data = self.graph.system_data - graph_body.properties = self.graph.properties - - return graph_body.serialize() + graph_body = MediaGraphTopologySetRequestBody(name=self.graph.name) + graph_body.system_data = self.graph.system_data + graph_body.properties = self.graph.properties + + return graph_body.serialize() MediaGraphTopologySetRequest.serialize = _OverrideTopologySetRequestSerialize def _OverrideInstanceSetRequestSerialize(self): - graph_body = MediaGraphInstanceSetRequestBody(name=self.instance.name) - graph_body.system_data = self.instance.system_data - graph_body.properties = self.instance.properties - - return graph_body.serialize() + graph_body = MediaGraphInstanceSetRequestBody(name=self.instance.name) + graph_body.system_data = self.instance.system_data + graph_body.properties = self.instance.properties + + return graph_body.serialize() -MediaGraphInstanceSetRequest.serialize = _OverrideInstanceSetRequestSerialize \ No newline at end of file +MediaGraphInstanceSetRequest.serialize = _OverrideInstanceSetRequestSerialize diff --git a/sdk/media/azure-media-lva-edge/swagger/README.md b/sdk/media/azure-media-lva-edge/swagger/README.md index e80c97ff0f3c..ff8338377dc3 100644 --- a/sdk/media/azure-media-lva-edge/swagger/README.md +++ b/sdk/media/azure-media-lva-edge/swagger/README.md @@ -3,13 +3,13 @@ > see https://aka.ms/autorest -### Generation +## Generation ```ps cd autorest --v3 --python README.md ``` -### Settings +## Settings ```yaml require: <>Azure\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md output-folder: ../azure/media/lva/edge/_generated diff --git a/sdk/media/azure-media-lva-edge/tests/test_app_config.py b/sdk/media/azure-media-lva-edge/tests/test_app_config.py new file mode 100644 index 000000000000..57f0ccfa146f --- /dev/null +++ b/sdk/media/azure-media-lva-edge/tests/test_app_config.py @@ -0,0 +1,5 @@ +import pytest + +class TestAppConfig(): + def test_something(self): + assert 1 \ No newline at end of file From 3badfa8e967ea135d384535009c06497e4e3e9ef Mon Sep 17 00:00:00 2001 From: hivyas Date: Wed, 18 Nov 2020 16:04:26 -0800 Subject: [PATCH 04/64] modifying readme to pass tests --- sdk/media/azure-media-lva-edge/README.md | 6 +++++- sdk/media/azure-media-lva-edge/swagger/README.md | 9 ++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index c5012d4038c9..42f3c2d52227 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -6,7 +6,7 @@ Modern programs, especially programs running in a cloud, generally have many com Use the client library for App Configuration to create and manage application configuration settings. -## Prerequisites +## Getting started * Python 2.7, or 3.5 or later is required to use this package. * You need an [Azure subscription][azure_sub], and a [Configuration Store][configuration_store] to use this package. @@ -18,8 +18,12 @@ After that, create the Configuration Store: ```Powershell az appconfig create --name --resource-group --location eastus ``` +## Key Concepts +## Examples +## Troubleshooting +## Next Steps ## Contributing This project welcomes contributions and suggestions. Most contributions require diff --git a/sdk/media/azure-media-lva-edge/swagger/README.md b/sdk/media/azure-media-lva-edge/swagger/README.md index ff8338377dc3..9bd11368b134 100644 --- a/sdk/media/azure-media-lva-edge/swagger/README.md +++ b/sdk/media/azure-media-lva-edge/swagger/README.md @@ -3,13 +3,20 @@ > see https://aka.ms/autorest -## Generation +## Getting started ```ps cd autorest --v3 --python README.md ``` +## Key Concepts +## Examples + +## Troubleshooting + +## Next Steps ## Settings + ```yaml require: <>Azure\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md output-folder: ../azure/media/lva/edge/_generated From 8b91e11081e4b487431242e8b5cb0be0effe15c6 Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 19 Nov 2020 14:50:23 -0800 Subject: [PATCH 05/64] fixed missing tests error --- sdk/media/azure-media-lva-edge/MANIFEST.in | 1 + .../azure-media-lva-edge/dev_requirements.txt | 2 +- .../azure-media-lva-edge/tests/conftest.py | 25 +++++++++++++++++++ 3 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 sdk/media/azure-media-lva-edge/tests/conftest.py diff --git a/sdk/media/azure-media-lva-edge/MANIFEST.in b/sdk/media/azure-media-lva-edge/MANIFEST.in index 4a340e3b7f85..7ebdd947f8ff 100644 --- a/sdk/media/azure-media-lva-edge/MANIFEST.in +++ b/sdk/media/azure-media-lva-edge/MANIFEST.in @@ -1,3 +1,4 @@ +recursive-include tests *.py include *.md include azure/__init__.py recursive-include samples *.py *.md diff --git a/sdk/media/azure-media-lva-edge/dev_requirements.txt b/sdk/media/azure-media-lva-edge/dev_requirements.txt index 08bcfb306787..1d971eca1249 100644 --- a/sdk/media/azure-media-lva-edge/dev_requirements.txt +++ b/sdk/media/azure-media-lva-edge/dev_requirements.txt @@ -5,7 +5,7 @@ aiohttp>=3.0; python_version >= '3.5' aiodns>=2.0; python_version >= '3.5' msrest>=0.6.10 -pytest==5.4.2 +pytest>=4.6.9 tox>=3.20.0 tox-monorepo>=0.1.2 pytest-asyncio==0.12.0 diff --git a/sdk/media/azure-media-lva-edge/tests/conftest.py b/sdk/media/azure-media-lva-edge/tests/conftest.py new file mode 100644 index 000000000000..c36aaed14908 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/tests/conftest.py @@ -0,0 +1,25 @@ +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- From 2419d1b870d7cad2420ce2031419743d4b38e41e Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 19 Nov 2020 15:06:09 -0800 Subject: [PATCH 06/64] fixing dev requirments --- sdk/media/azure-media-lva-edge/README.md | 6 ++++-- .../azure-media-lva-edge/dev_requirements.txt | 1 - sdk/media/azure-media-lva-edge/swagger/README.md | 14 ++++++++------ 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index 42f3c2d52227..1d7f3a425d64 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -19,11 +19,13 @@ After that, create the Configuration Store: az appconfig create --name --resource-group --location eastus ``` ## Key Concepts +sample ## Examples - +sample ## Troubleshooting - +sample ## Next Steps +sample ## Contributing This project welcomes contributions and suggestions. Most contributions require diff --git a/sdk/media/azure-media-lva-edge/dev_requirements.txt b/sdk/media/azure-media-lva-edge/dev_requirements.txt index 1d971eca1249..cca01aec8af4 100644 --- a/sdk/media/azure-media-lva-edge/dev_requirements.txt +++ b/sdk/media/azure-media-lva-edge/dev_requirements.txt @@ -5,7 +5,6 @@ aiohttp>=3.0; python_version >= '3.5' aiodns>=2.0; python_version >= '3.5' msrest>=0.6.10 -pytest>=4.6.9 tox>=3.20.0 tox-monorepo>=0.1.2 pytest-asyncio==0.12.0 diff --git a/sdk/media/azure-media-lva-edge/swagger/README.md b/sdk/media/azure-media-lva-edge/swagger/README.md index 9bd11368b134..27ae17c26693 100644 --- a/sdk/media/azure-media-lva-edge/swagger/README.md +++ b/sdk/media/azure-media-lva-edge/swagger/README.md @@ -1,7 +1,5 @@ # Azure Queue Storage for Python - -> see https://aka.ms/autorest - +see `https://aka.ms/autorest` ## Getting started ```ps @@ -9,12 +7,16 @@ cd autorest --v3 --python README.md ``` ## Key Concepts - +sample ## Examples - +sample ## Troubleshooting - +sample ## Next Steps +sample + +## Contributing +sample ## Settings ```yaml From 34cd455158585ffa23abf5a0be06dce7c76b509c Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 19 Nov 2020 15:27:35 -0800 Subject: [PATCH 07/64] fixing readme and dev requirment --- sdk/media/azure-media-lva-edge/README.md | 2 ++ sdk/media/azure-media-lva-edge/dev_requirements.txt | 3 +-- sdk/media/azure-media-lva-edge/swagger/README.md | 3 +++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index 1d7f3a425d64..0b3e33a1f15e 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -19,12 +19,14 @@ After that, create the Configuration Store: az appconfig create --name --resource-group --location eastus ``` ## Key Concepts + sample ## Examples sample ## Troubleshooting sample ## Next Steps + sample ## Contributing diff --git a/sdk/media/azure-media-lva-edge/dev_requirements.txt b/sdk/media/azure-media-lva-edge/dev_requirements.txt index cca01aec8af4..c3cf063e6b31 100644 --- a/sdk/media/azure-media-lva-edge/dev_requirements.txt +++ b/sdk/media/azure-media-lva-edge/dev_requirements.txt @@ -6,5 +6,4 @@ aiohttp>=3.0; python_version >= '3.5' aiodns>=2.0; python_version >= '3.5' msrest>=0.6.10 tox>=3.20.0 -tox-monorepo>=0.1.2 -pytest-asyncio==0.12.0 +tox-monorepo>=0.1.2 \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/swagger/README.md b/sdk/media/azure-media-lva-edge/swagger/README.md index 27ae17c26693..de1a5c4080de 100644 --- a/sdk/media/azure-media-lva-edge/swagger/README.md +++ b/sdk/media/azure-media-lva-edge/swagger/README.md @@ -1,4 +1,5 @@ # Azure Queue Storage for Python + see `https://aka.ms/autorest` ## Getting started @@ -7,12 +8,14 @@ cd autorest --v3 --python README.md ``` ## Key Concepts + sample ## Examples sample ## Troubleshooting sample ## Next Steps + sample ## Contributing From 637974d1224a1426f339499c31a780bc30feff15 Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 20 Nov 2020 12:00:14 -0800 Subject: [PATCH 08/64] renaming readme file --- .../swagger/{README.md => autorest.md} | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) rename sdk/media/azure-media-lva-edge/swagger/{README.md => autorest.md} (79%) diff --git a/sdk/media/azure-media-lva-edge/swagger/README.md b/sdk/media/azure-media-lva-edge/swagger/autorest.md similarity index 79% rename from sdk/media/azure-media-lva-edge/swagger/README.md rename to sdk/media/azure-media-lva-edge/swagger/autorest.md index de1a5c4080de..1dcac1fff73e 100644 --- a/sdk/media/azure-media-lva-edge/swagger/README.md +++ b/sdk/media/azure-media-lva-edge/swagger/autorest.md @@ -5,21 +5,8 @@ see `https://aka.ms/autorest` ## Getting started ```ps cd -autorest --v3 --python README.md +autorest --v3 --python autorest.md ``` -## Key Concepts - -sample -## Examples -sample -## Troubleshooting -sample -## Next Steps - -sample - -## Contributing -sample ## Settings ```yaml From 99a9c276f5479435d2922f6640ba88a26a1e5ab2 Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 20 Nov 2020 12:03:29 -0800 Subject: [PATCH 09/64] fixed casing in read me file --- sdk/media/azure-media-lva-edge/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index 0b3e33a1f15e..11728924e8a7 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -18,14 +18,14 @@ After that, create the Configuration Store: ```Powershell az appconfig create --name --resource-group --location eastus ``` -## Key Concepts +## Key concepts sample ## Examples sample ## Troubleshooting sample -## Next Steps +## Next steps sample ## Contributing From 86ec46b1a8b263498fe78fae0fe67624d5aa2e25 Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 20 Nov 2020 12:25:12 -0800 Subject: [PATCH 10/64] fixing dependency requirments --- sdk/media/azure-media-lva-edge/setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/media/azure-media-lva-edge/setup.py b/sdk/media/azure-media-lva-edge/setup.py index d4a8c12edcc6..a4bfc61f9c6f 100644 --- a/sdk/media/azure-media-lva-edge/setup.py +++ b/sdk/media/azure-media-lva-edge/setup.py @@ -87,8 +87,8 @@ zip_safe=False, packages=find_packages(exclude=exclude_packages), install_requires=[ - "msrest>=0.6.10", - "azure-core<2.0.0,>=1.0.0", + "msrest>=0.5.0", + "azure-core<2.0.0,>=1.2.2", ], extras_require={ ":python_version<'3.0'": ['azure-nspkg'], From ca8ce7da315f14b5ddeb1f357667c3c705e6675e Mon Sep 17 00:00:00 2001 From: hivyas Date: Wed, 2 Dec 2020 08:23:23 -0800 Subject: [PATCH 11/64] updating README file --- sdk/media/azure-media-lva-edge/README.md | 71 +++++++++++++++---- .../azure-media-lva-edge/docs/DevTips.md | 40 +++++++++++ .../azure-media-lva-edge/swagger/autorest.md | 2 +- 3 files changed, 97 insertions(+), 16 deletions(-) create mode 100644 sdk/media/azure-media-lva-edge/docs/DevTips.md diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index 11728924e8a7..d2f2534482f7 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -1,33 +1,53 @@ -# Azure App Configuration client library for Python SDK Training +# Azure Live Video Analytics for IoT Edge client library for Python -Azure App Configuration is a managed service that helps developers centralize their application configurations simply and securely. +Live Video Analytics on IoT Edge provides a platform to build intelligent video applications that span the edge and the cloud. The platform offers the capability to capture, record, and analyze live video along with publishing the results, video and video analytics, to Azure services in the cloud or the edge. It is designed to be an extensible platform, enabling you to connect different video analysis edge modules (such as Cognitive services containers, custom edge modules built by you with open-source machine learning models or custom models trained with your own data) to it and use them to analyze live video without worrying about the complexity of building and running a live video pipeline. -Modern programs, especially programs running in a cloud, generally have many components that are distributed in nature. Spreading configuration settings across these components can lead to hard-to-troubleshoot errors during an application deployment. Use App Configuration to securely store all the settings for your application in one place. +Use the client library for Live Video Analytics on IoT Edge to: -Use the client library for App Configuration to create and manage application configuration settings. +- simplify interactions with the [Microsoft Azure IoT SDKs](https://github.com/azure/azure-iot-sdks) +- programatically construct media graph topologies and instances -## Getting started +[Package (PyPi)][package] | [Product documentation][doc_product] | [Direct methods][doc_direct_methods] | [Media graphs][doc_media_graph] | [Source code][source] | [Samples][samples] -* Python 2.7, or 3.5 or later is required to use this package. -* You need an [Azure subscription][azure_sub], and a [Configuration Store][configuration_store] to use this package. +## Getting started -To create a Configuration Store, you can use the Azure Portal or [Azure CLI][azure_cli]. +### Install the package -After that, create the Configuration Store: +Install the Live Video Analytics client library for Python with pip: -```Powershell -az appconfig create --name --resource-group --location eastus +```bash +pip install azure-lva-edge ``` +### Prerequisites + +* Python 2.7, or 3.5 or later is required to use this package. +* You need an [Azure subscription][azure_sub], and a [IOT device connection string][iot_device_connection_string] to use this package. + + ## Key concepts -sample +### Graph Topology vs Graph Instance +A graph topology is essentially the blueprint or template of a graph. It defines the parameters of the graph using placeholders as values for them. A graph instance references a graph topology and specifies the parameters. This way you are able to have multiple graph instances referencing the same topology but with different values for parameters. For more information please visit [Media graph topologies and instances][doc_media_graph] + +### CloudToDeviceMethod + +The `CloudToDeviceMethod` is part of the azure-iot-hub sdk. This method allows you to communicate one way notifications to a device in your iot hub. In our case we want to communicate various graph methods such as `MediaGraphTopologySetRequest` and `MediaGraphTopologyGetRequest`. To use `CloudToDeviceMethod` you need to pass in two parameters: `method_name` and `payload`. `method_name` should be the name of the media graph request you are sending. Each media graph request has a property called `method_name`. For example, `MediaGraphTopologySetRequest.method_name`. For the second parameter `payload` send the entire serialization of the media graph request. For example, `MediaGraphTopologySetRequest.serialize()` + ## Examples -sample + +[Samples][samples] + ## Troubleshooting -sample + +- When sending a method request using the IoT Hub's `CloudToDeviceMethod` remember to not type in the method request name directly. Instead use `[MethodRequestName.method_name]` +- Make sure to serialize the entire method request before passing it to `CloudToDeviceMethod` + ## Next steps -sample +- [Samples][samples] +- [Azure IoT Device SDK][iot-device-sdk] +- [Azure IoTHub Service SDK][iot-hub-sdk] + ## Contributing This project welcomes contributions and suggestions. Most contributions require @@ -44,3 +64,24 @@ This project has adopted the [Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, see the Code of Conduct FAQ or contact opencode@microsoft.com with any additional questions or comments. + + +[azure_cli]: https://docs.microsoft.com/cli/azure +[azure_sub]: https://azure.microsoft.com/free/ + +[cla]: https://cla.microsoft.com +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ +[coc_contact]: mailto:opencode@microsoft.com + +[package]: TODO +[source]: TODO://link-to-path-in-the-SDK-repo +[samples]: https://github.com/Azure-Samples/live-video-analytics-iot-edge-python + +[doc_direct_methods]: https://docs.microsoft.com/azure/media-services/live-video-analytics-edge/direct-methods +[doc_media_graph]: https://docs.microsoft.com/azure/media-services/live-video-analytics-edge/media-graph-concept#media-graph-topologies-and-instances +[doc_product]: https://docs.microsoft.com/azure/media-services/live-video-analytics-edge/ + +[iot-device-sdk]: https://pypi.org/project/azure-iot-device/ +[iot-hub-sdk]: https://pypi.org/project/azure-iot-hub/ +[iot_device_connection_string]: https://docs.microsoft.com/en-us/azure/media-services/live-video-analytics-edge/get-started-detect-motion-emit-events-quickstart \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/docs/DevTips.md b/sdk/media/azure-media-lva-edge/docs/DevTips.md new file mode 100644 index 000000000000..f6dbf9e1277c --- /dev/null +++ b/sdk/media/azure-media-lva-edge/docs/DevTips.md @@ -0,0 +1,40 @@ +## How to update the lva sdk + +1. Clone the latest swagger onto your local machine +2. Replace the `require` field inside of `autorest.md` to point to your local swagger file +3. Generate the sdk using the autorest command which can be found inside the `autorest.md` file +4. Add any customization functions inside of `sdk\media\azure-media-lva-edge\azure\media\lva\edge\__init__.py`. Make sure the customization functions are outside of the `_generated` folder. +5. Update the README file and Changelog with the latest version number +6. Submit a PR + +## Running tox locally + +Tox is the testing and virtual environment management tool that is used to verify our sdk will be installed correctly with different Python versions and interpreters. To run tox follow these instructions + +``` +pip install tox tox-monorepo +cd path/to/target/folder +tox -c eng/tox/tox.ini +``` +To run a specific tox command from your directory use the following commands: +```bash +azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e sphinx +azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e lint +azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e mypy +azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e whl +azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e sdist +``` +A quick description of the five commands above: +* sphinx: documentation generation using the inline comments written in our code +* lint: runs pylint to make sure our code adheres to the style guidance +* mypy: runs the mypy static type checker for Python to make sure that our types are valid +* whl: creates a whl package for installing our package +* sdist: creates a zipped distribution of our files that the end user could install with pip + + +### Troubleshooting tox errors + +- Tox will complain if there are no tests. Add a dummy test in case you need to bypass this +- Make sure there is an `__init__.py` file inside of every directory inside of `azure` (Example: `azure/media` should have an __init__.py file) +- Follow the ReadMe guidelines outlined here: https://review.docs.microsoft.com/en-us/help/contribute-ref/contribute-ref-how-to-document-sdk?branch=master#readme. ReadMe titles are case SENSITIVE and use sentence casing. +- Make sure MANIFEST.in includes all required folders. (Most likely the required folders will be tests, samples, and the generated folder) diff --git a/sdk/media/azure-media-lva-edge/swagger/autorest.md b/sdk/media/azure-media-lva-edge/swagger/autorest.md index 1dcac1fff73e..48618fb331ed 100644 --- a/sdk/media/azure-media-lva-edge/swagger/autorest.md +++ b/sdk/media/azure-media-lva-edge/swagger/autorest.md @@ -5,7 +5,7 @@ see `https://aka.ms/autorest` ## Getting started ```ps cd -autorest --v3 --python autorest.md +autorest --v3 --python ``` ## Settings From ca812fc825bc98710ea92427c14e0a8aeaa9b629 Mon Sep 17 00:00:00 2001 From: hivyas Date: Wed, 2 Dec 2020 08:48:49 -0800 Subject: [PATCH 12/64] fixed broken links --- sdk/media/azure-media-lva-edge/README.md | 4 ++-- sdk/media/azure-media-lva-edge/docs/DevTips.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index d2f2534482f7..9380cdea6fe7 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -74,7 +74,7 @@ additional questions or comments. [coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ [coc_contact]: mailto:opencode@microsoft.com -[package]: TODO +[package]: placeholder [source]: TODO://link-to-path-in-the-SDK-repo [samples]: https://github.com/Azure-Samples/live-video-analytics-iot-edge-python @@ -84,4 +84,4 @@ additional questions or comments. [iot-device-sdk]: https://pypi.org/project/azure-iot-device/ [iot-hub-sdk]: https://pypi.org/project/azure-iot-hub/ -[iot_device_connection_string]: https://docs.microsoft.com/en-us/azure/media-services/live-video-analytics-edge/get-started-detect-motion-emit-events-quickstart \ No newline at end of file +[iot_device_connection_string]: https://docs.microsoft.com/azure/media-services/live-video-analytics-edge/get-started-detect-motion-emit-events-quickstart \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/docs/DevTips.md b/sdk/media/azure-media-lva-edge/docs/DevTips.md index f6dbf9e1277c..b649d500d873 100644 --- a/sdk/media/azure-media-lva-edge/docs/DevTips.md +++ b/sdk/media/azure-media-lva-edge/docs/DevTips.md @@ -36,5 +36,5 @@ A quick description of the five commands above: - Tox will complain if there are no tests. Add a dummy test in case you need to bypass this - Make sure there is an `__init__.py` file inside of every directory inside of `azure` (Example: `azure/media` should have an __init__.py file) -- Follow the ReadMe guidelines outlined here: https://review.docs.microsoft.com/en-us/help/contribute-ref/contribute-ref-how-to-document-sdk?branch=master#readme. ReadMe titles are case SENSITIVE and use sentence casing. +- Follow the ReadMe guidelines outlined here: https://review.docs.microsoft.com/help/contribute-ref/contribute-ref-how-to-document-sdk?branch=master#readme. ReadMe titles are case SENSITIVE and use sentence casing. - Make sure MANIFEST.in includes all required folders. (Most likely the required folders will be tests, samples, and the generated folder) From b90a28afbe6251b40992b9a104aadc2a0f9ade81 Mon Sep 17 00:00:00 2001 From: hivyas Date: Wed, 2 Dec 2020 09:15:05 -0800 Subject: [PATCH 13/64] fixed broken link --- sdk/media/azure-media-lva-edge/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index 9380cdea6fe7..bf4d7cba4c86 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -74,7 +74,7 @@ additional questions or comments. [coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ [coc_contact]: mailto:opencode@microsoft.com -[package]: placeholder +[package]: TODO://link-to-published-package [source]: TODO://link-to-path-in-the-SDK-repo [samples]: https://github.com/Azure-Samples/live-video-analytics-iot-edge-python From 67d9f15bc36f01a2bf04cf104929de4427bc044d Mon Sep 17 00:00:00 2001 From: hivyas Date: Wed, 2 Dec 2020 11:56:41 -0800 Subject: [PATCH 14/64] updated readme with examples --- sdk/media/azure-media-lva-edge/README.md | 48 +++++++++++++++++-- .../samples/sample_lva.py | 4 +- 2 files changed, 46 insertions(+), 6 deletions(-) diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index bf4d7cba4c86..814c44507c97 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -4,8 +4,8 @@ Live Video Analytics on IoT Edge provides a platform to build intelligent video Use the client library for Live Video Analytics on IoT Edge to: -- simplify interactions with the [Microsoft Azure IoT SDKs](https://github.com/azure/azure-iot-sdks) -- programatically construct media graph topologies and instances +- Simplify interactions with the [Microsoft Azure IoT SDKs](https://github.com/azure/azure-iot-sdks) +- Programatically construct media graph topologies and instances [Package (PyPi)][package] | [Product documentation][doc_product] | [Direct methods][doc_direct_methods] | [Media graphs][doc_media_graph] | [Source code][source] | [Samples][samples] @@ -23,7 +23,8 @@ pip install azure-lva-edge * Python 2.7, or 3.5 or later is required to use this package. * You need an [Azure subscription][azure_sub], and a [IOT device connection string][iot_device_connection_string] to use this package. - +### Creating a graph topology and making requests +Please visit the [Examples](#examples) for starter code ## Key concepts ### Graph Topology vs Graph Instance @@ -35,7 +36,46 @@ The `CloudToDeviceMethod` is part of the azure-iot-hub sdk. This method allows y ## Examples -[Samples][samples] +### Creating a graph topology +To create a graph topology you need to define parameters, sources, and sinks. +``` +#Parameters +user_name_param = MediaGraphParameterDeclaration(name="rtspUserName",type="String",default="dummyusername") +password_param = MediaGraphParameterDeclaration(name="rtspPassword",type="String",default="dummypassword") +url_param = MediaGraphParameterDeclaration(name="rtspUrl",type="String",default="rtsp://www.sample.com") + +#Source and Sink +source = MediaGraphRtspSource(name="rtspSource", endpoint=MediaGraphUnsecuredEndpoint(url="${rtspUrl}",credentials=MediaGraphUsernamePasswordCredentials(username="${rtspUserName}",password="${rtspPassword}"))) +node = MediaGraphNodeInput(node_name="rtspSource") +sink = MediaGraphAssetSink(name="assetsink", inputs=[node],asset_name_pattern='sampleAsset-${System.GraphTopologyName}-${System.GraphInstanceName}', segment_length="PT0H0M30S",local_media_cache_maximum_size_mi_b=2048,local_media_cache_path="/var/lib/azuremediaservices/tmp/") + +graph_properties = MediaGraphTopologyProperties(parameters=[user_name_param, password_param, url_param], sources=[source], sinks=[sink], description="Continuous video recording to an Azure Media Services Asset") + +graph_topology = MediaGraphTopology(name=graph_topology_name,properties=graph_properties) + +``` + +### Creating a graph instance +To create a graph instance, you need to have an existing graph topology. +``` +url_param = MediaGraphParameterDefinition(name="rtspUrl", value=graph_url) +graph_instance_properties = MediaGraphInstanceProperties(description="Sample graph description", topology_name=graph_topology_name, parameters=[url_param]) + +graph_instance = MediaGraphInstance(name=graph_instance_name, properties=graph_instance_properties) + +``` + +### Invoking a graph method request +To invoke a graph method on your device you need to first define the request using the lva sdk. Then send that method request using the iot sdk's `CloudToDeviceMethod` +``` +set_method_request = MediaGraphTopologySetRequest(graph=graph_topology) +direct_method = CloudToDeviceMethod(method_name=set_method_request.method_name, payload=set_method_request.serialize()) +registry_manager = IoTHubRegistryManager(connection_string) + +registry_manager.invoke_device_module_method(device_id, module_d, direct_method) +``` + +For more samples please visit [Samples][samples]. ## Troubleshooting diff --git a/sdk/media/azure-media-lva-edge/samples/sample_lva.py b/sdk/media/azure-media-lva-edge/samples/sample_lva.py index 9b5e91818af6..46a5d64d3c39 100644 --- a/sdk/media/azure-media-lva-edge/samples/sample_lva.py +++ b/sdk/media/azure-media-lva-edge/samples/sample_lva.py @@ -11,7 +11,7 @@ connection_string = os.getenv("IOTHUB_DEVICE_CONNECTION_STRING") graph_instance_name = "graphInstance1" graph_topology_name = "graphTopology1" - +graph_url = '"rtsp://sample-url-from-camera"' def build_graph_topology(): graph_properties = MediaGraphTopologyProperties() @@ -31,7 +31,7 @@ def build_graph_topology(): return graph def build_graph_instance(): - url_param = MediaGraphParameterDefinition(name="rtspUrl", value="rtsp://rtspsim:554/media/camera-300s.mkv") + url_param = MediaGraphParameterDefinition(name="rtspUrl", value=graph_url) graph_instance_properties = MediaGraphInstanceProperties(description="Sample graph description", topology_name=graph_topology_name, parameters=[url_param]) graph_instance = MediaGraphInstance(name=graph_instance_name, properties=graph_instance_properties) From 584198d94c59fa19e207804c4a59bf30b37e86a6 Mon Sep 17 00:00:00 2001 From: hivyas Date: Wed, 2 Dec 2020 12:55:00 -0800 Subject: [PATCH 15/64] updating readme based on comments --- sdk/media/azure-media-lva-edge/README.md | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index 814c44507c97..278fb71a1cb7 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -21,18 +21,22 @@ pip install azure-lva-edge ### Prerequisites * Python 2.7, or 3.5 or later is required to use this package. -* You need an [Azure subscription][azure_sub], and a [IOT device connection string][iot_device_connection_string] to use this package. +* You need an active [Azure subscription][azure_sub], and a [IoT device connection string][iot_device_connection_string] to use this package. ### Creating a graph topology and making requests Please visit the [Examples](#examples) for starter code ## Key concepts -### Graph Topology vs Graph Instance -A graph topology is essentially the blueprint or template of a graph. It defines the parameters of the graph using placeholders as values for them. A graph instance references a graph topology and specifies the parameters. This way you are able to have multiple graph instances referencing the same topology but with different values for parameters. For more information please visit [Media graph topologies and instances][doc_media_graph] +### MediaGraph Topology vs MediaGraph Instance +A _graph topology_ is a blueprint or template of a graph. It defines the parameters of the graph using placeholders as values for them. A _graph instance_ references a graph topology and specifies the parameters. This way you are able to have multiple graph instances referencing the same topology but with different values for parameters. For more information please visit [Media graph topologies and instances][doc_media_graph] ### CloudToDeviceMethod -The `CloudToDeviceMethod` is part of the azure-iot-hub sdk. This method allows you to communicate one way notifications to a device in your iot hub. In our case we want to communicate various graph methods such as `MediaGraphTopologySetRequest` and `MediaGraphTopologyGetRequest`. To use `CloudToDeviceMethod` you need to pass in two parameters: `method_name` and `payload`. `method_name` should be the name of the media graph request you are sending. Each media graph request has a property called `method_name`. For example, `MediaGraphTopologySetRequest.method_name`. For the second parameter `payload` send the entire serialization of the media graph request. For example, `MediaGraphTopologySetRequest.serialize()` +The `CloudToDeviceMethod` is part of the [azure-iot-hub SDk][iot-hub-sdk]. This method allows you to communicate one way notifications to a device in your IoT hub. In our case, we want to communicate various graph methods such as `MediaGraphTopologySetRequest` and `MediaGraphTopologyGetRequest`. To use `CloudToDeviceMethod` you need to pass in two parameters: `method_name` and `payload`. + +The first parameter, `method_name`, is the name of the media graph request you are sending. Make sure to use each method's predefined `method_name` property. For example, `MediaGraphTopologySetRequest.method_name`. + +The second parameter, `payload`, sends the entire serialization of the media graph request. For example, `MediaGraphTopologySetRequest.serialize()` ## Examples @@ -42,7 +46,7 @@ To create a graph topology you need to define parameters, sources, and sinks. #Parameters user_name_param = MediaGraphParameterDeclaration(name="rtspUserName",type="String",default="dummyusername") password_param = MediaGraphParameterDeclaration(name="rtspPassword",type="String",default="dummypassword") -url_param = MediaGraphParameterDeclaration(name="rtspUrl",type="String",default="rtsp://www.sample.com") +url_param = MediaGraphParameterDeclaration(name="rtspUrl",type="String",default="rtsp://rtspsim:554/media/camera-300s.mkv") #Source and Sink source = MediaGraphRtspSource(name="rtspSource", endpoint=MediaGraphUnsecuredEndpoint(url="${rtspUrl}",credentials=MediaGraphUsernamePasswordCredentials(username="${rtspUserName}",password="${rtspPassword}"))) @@ -75,7 +79,7 @@ registry_manager = IoTHubRegistryManager(connection_string) registry_manager.invoke_device_module_method(device_id, module_d, direct_method) ``` -For more samples please visit [Samples][samples]. +To try different media graph topologies with the SDK, please see the official [Samples][samples]. ## Troubleshooting @@ -95,6 +99,8 @@ you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. +If you encounter any issues, please open an issue on our [Github][github-page-issues]. + When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only @@ -115,7 +121,7 @@ additional questions or comments. [coc_contact]: mailto:opencode@microsoft.com [package]: TODO://link-to-published-package -[source]: TODO://link-to-path-in-the-SDK-repo +[source]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/media/azure-media-lva-edge [samples]: https://github.com/Azure-Samples/live-video-analytics-iot-edge-python [doc_direct_methods]: https://docs.microsoft.com/azure/media-services/live-video-analytics-edge/direct-methods @@ -124,4 +130,6 @@ additional questions or comments. [iot-device-sdk]: https://pypi.org/project/azure-iot-device/ [iot-hub-sdk]: https://pypi.org/project/azure-iot-hub/ -[iot_device_connection_string]: https://docs.microsoft.com/azure/media-services/live-video-analytics-edge/get-started-detect-motion-emit-events-quickstart \ No newline at end of file +[iot_device_connection_string]: https://docs.microsoft.com/azure/media-services/live-video-analytics-edge/get-started-detect-motion-emit-events-quickstart + +[github-page-issues]: https://github.com/Azure/azure-sdk-for-python/issues \ No newline at end of file From f2b12d579dcafe04c90bb5925a4362f781f9dcc7 Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 3 Dec 2020 08:50:08 -0800 Subject: [PATCH 16/64] changing package name --- .../CHANGELOG.md | 0 .../MANIFEST.in | 0 .../README.md | 2 +- .../azure/__init__.py | 0 .../azure/media/__init__.py | 0 .../media/livevideoanalytics}/__init__.py | 0 .../livevideoanalytics}/edge/__init__.py | 0 .../edge/_generated/__init__.py | 0 .../edge/_generated/_version.py | 0 .../edge/_generated/models/__init__.py | 0 ...r_live_video_analyticson_io_tedge_enums.py | 0 .../edge/_generated/models/_models.py | 0 .../edge/_generated/models/_models_py3.py | 0 .../edge/_generated/py.typed | 0 .../livevideoanalytics}/edge/_version.py | 0 .../media/lva/edge/_generated/__init__.py | 1 + .../media/lva/edge/_generated/_version.py | 9 + .../lva/edge/_generated/models/__init__.py | 199 ++ ...r_live_video_analyticson_io_tedge_enums.py | 108 + .../lva/edge/_generated/models/_models.py | 2008 +++++++++++++++ .../lva/edge/_generated/models/_models_py3.py | 2185 +++++++++++++++++ .../azure/media/lva/edge/_generated/py.typed | 1 + .../dev_requirements.txt | 0 .../docs/DevTips.md | 10 +- .../samples/sample_conditional_async.py | 0 .../samples/sample_hello_world.py | 0 .../samples/sample_lva.py | 0 .../sdk_packaging.toml | 0 .../setup.cfg | 0 .../setup.py | 2 +- .../swagger/autorest.md | 2 +- .../swagger/commandOutput.txt | 0 .../tests/conftest.py | 0 .../tests/test_app_config.py | 0 34 files changed, 4519 insertions(+), 8 deletions(-) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/CHANGELOG.md (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/MANIFEST.in (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/README.md (99%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/azure/__init__.py (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/azure/media/__init__.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/__init__.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/__init__.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/_generated/__init__.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/_generated/_version.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/_generated/models/__init__.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/_generated/models/_models.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/_generated/models/_models_py3.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/_generated/py.typed (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/_version.py (100%) create mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/__init__.py create mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/_version.py create mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/__init__.py create mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py create mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models.py create mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models_py3.py create mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/py.typed rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/dev_requirements.txt (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/docs/DevTips.md (80%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/samples/sample_conditional_async.py (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/samples/sample_hello_world.py (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/samples/sample_lva.py (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/sdk_packaging.toml (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/setup.cfg (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/setup.py (98%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/swagger/autorest.md (78%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/swagger/commandOutput.txt (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/tests/conftest.py (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/tests/test_app_config.py (100%) diff --git a/sdk/media/azure-media-lva-edge/CHANGELOG.md b/sdk/media/azure-media-livevideoanalytics-edge/CHANGELOG.md similarity index 100% rename from sdk/media/azure-media-lva-edge/CHANGELOG.md rename to sdk/media/azure-media-livevideoanalytics-edge/CHANGELOG.md diff --git a/sdk/media/azure-media-lva-edge/MANIFEST.in b/sdk/media/azure-media-livevideoanalytics-edge/MANIFEST.in similarity index 100% rename from sdk/media/azure-media-lva-edge/MANIFEST.in rename to sdk/media/azure-media-livevideoanalytics-edge/MANIFEST.in diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-livevideoanalytics-edge/README.md similarity index 99% rename from sdk/media/azure-media-lva-edge/README.md rename to sdk/media/azure-media-livevideoanalytics-edge/README.md index 278fb71a1cb7..d2467be44b9e 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/README.md @@ -16,7 +16,7 @@ Use the client library for Live Video Analytics on IoT Edge to: Install the Live Video Analytics client library for Python with pip: ```bash -pip install azure-lva-edge +pip install azure-media-livevideoanalytics--edge ``` ### Prerequisites diff --git a/sdk/media/azure-media-lva-edge/azure/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/__init__.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/__init__.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/__init__.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/__init__.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/__init__.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/__init__.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/__init__.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/__init__.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/__init__.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/__init__.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/__init__.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/__init__.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/_version.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/_version.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/_version.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/_version.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/__init__.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/__init__.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/__init__.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models_py3.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/py.typed b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/py.typed similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/py.typed rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/py.typed diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_version.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_version.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_version.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_version.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/__init__.py new file mode 100644 index 000000000000..5960c353a898 --- /dev/null +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore \ No newline at end of file diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/_version.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/_version.py new file mode 100644 index 000000000000..31ed98425268 --- /dev/null +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "1.0" diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/__init__.py new file mode 100644 index 000000000000..2e389ab8ef9d --- /dev/null +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/__init__.py @@ -0,0 +1,199 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import ItemNonSetRequestBase + from ._models_py3 import MediaGraphAssetSink + from ._models_py3 import MediaGraphCertificateSource + from ._models_py3 import MediaGraphCognitiveServicesVisionExtension + from ._models_py3 import MediaGraphCredentials + from ._models_py3 import MediaGraphEndpoint + from ._models_py3 import MediaGraphExtensionProcessorBase + from ._models_py3 import MediaGraphFileSink + from ._models_py3 import MediaGraphFrameRateFilterProcessor + from ._models_py3 import MediaGraphGrpcExtension + from ._models_py3 import MediaGraphGrpcExtensionDataTransfer + from ._models_py3 import MediaGraphHttpExtension + from ._models_py3 import MediaGraphHttpHeaderCredentials + from ._models_py3 import MediaGraphImage + from ._models_py3 import MediaGraphImageFormat + from ._models_py3 import MediaGraphImageFormatEncoded + from ._models_py3 import MediaGraphImageFormatRaw + from ._models_py3 import MediaGraphImageScale + from ._models_py3 import MediaGraphInstance + from ._models_py3 import MediaGraphInstanceActivateRequest + from ._models_py3 import MediaGraphInstanceCollection + from ._models_py3 import MediaGraphInstanceDeActivateRequest + from ._models_py3 import MediaGraphInstanceDeleteRequest + from ._models_py3 import MediaGraphInstanceGetRequest + from ._models_py3 import MediaGraphInstanceListRequest + from ._models_py3 import MediaGraphInstanceProperties + from ._models_py3 import MediaGraphInstanceSetRequest + from ._models_py3 import MediaGraphInstanceSetRequestBody + from ._models_py3 import MediaGraphIoTHubMessageSink + from ._models_py3 import MediaGraphIoTHubMessageSource + from ._models_py3 import MediaGraphMotionDetectionProcessor + from ._models_py3 import MediaGraphNodeInput + from ._models_py3 import MediaGraphOutputSelector + from ._models_py3 import MediaGraphParameterDeclaration + from ._models_py3 import MediaGraphParameterDefinition + from ._models_py3 import MediaGraphPemCertificateList + from ._models_py3 import MediaGraphProcessor + from ._models_py3 import MediaGraphRtspSource + from ._models_py3 import MediaGraphSignalGateProcessor + from ._models_py3 import MediaGraphSink + from ._models_py3 import MediaGraphSource + from ._models_py3 import MediaGraphSystemData + from ._models_py3 import MediaGraphTlsEndpoint + from ._models_py3 import MediaGraphTlsValidationOptions + from ._models_py3 import MediaGraphTopology + from ._models_py3 import MediaGraphTopologyCollection + from ._models_py3 import MediaGraphTopologyDeleteRequest + from ._models_py3 import MediaGraphTopologyGetRequest + from ._models_py3 import MediaGraphTopologyListRequest + from ._models_py3 import MediaGraphTopologyProperties + from ._models_py3 import MediaGraphTopologySetRequest + from ._models_py3 import MediaGraphTopologySetRequestBody + from ._models_py3 import MediaGraphUnsecuredEndpoint + from ._models_py3 import MediaGraphUsernamePasswordCredentials + from ._models_py3 import OperationBase +except (SyntaxError, ImportError): + from ._models import ItemNonSetRequestBase # type: ignore + from ._models import MediaGraphAssetSink # type: ignore + from ._models import MediaGraphCertificateSource # type: ignore + from ._models import MediaGraphCognitiveServicesVisionExtension # type: ignore + from ._models import MediaGraphCredentials # type: ignore + from ._models import MediaGraphEndpoint # type: ignore + from ._models import MediaGraphExtensionProcessorBase # type: ignore + from ._models import MediaGraphFileSink # type: ignore + from ._models import MediaGraphFrameRateFilterProcessor # type: ignore + from ._models import MediaGraphGrpcExtension # type: ignore + from ._models import MediaGraphGrpcExtensionDataTransfer # type: ignore + from ._models import MediaGraphHttpExtension # type: ignore + from ._models import MediaGraphHttpHeaderCredentials # type: ignore + from ._models import MediaGraphImage # type: ignore + from ._models import MediaGraphImageFormat # type: ignore + from ._models import MediaGraphImageFormatEncoded # type: ignore + from ._models import MediaGraphImageFormatRaw # type: ignore + from ._models import MediaGraphImageScale # type: ignore + from ._models import MediaGraphInstance # type: ignore + from ._models import MediaGraphInstanceActivateRequest # type: ignore + from ._models import MediaGraphInstanceCollection # type: ignore + from ._models import MediaGraphInstanceDeActivateRequest # type: ignore + from ._models import MediaGraphInstanceDeleteRequest # type: ignore + from ._models import MediaGraphInstanceGetRequest # type: ignore + from ._models import MediaGraphInstanceListRequest # type: ignore + from ._models import MediaGraphInstanceProperties # type: ignore + from ._models import MediaGraphInstanceSetRequest # type: ignore + from ._models import MediaGraphInstanceSetRequestBody # type: ignore + from ._models import MediaGraphIoTHubMessageSink # type: ignore + from ._models import MediaGraphIoTHubMessageSource # type: ignore + from ._models import MediaGraphMotionDetectionProcessor # type: ignore + from ._models import MediaGraphNodeInput # type: ignore + from ._models import MediaGraphOutputSelector # type: ignore + from ._models import MediaGraphParameterDeclaration # type: ignore + from ._models import MediaGraphParameterDefinition # type: ignore + from ._models import MediaGraphPemCertificateList # type: ignore + from ._models import MediaGraphProcessor # type: ignore + from ._models import MediaGraphRtspSource # type: ignore + from ._models import MediaGraphSignalGateProcessor # type: ignore + from ._models import MediaGraphSink # type: ignore + from ._models import MediaGraphSource # type: ignore + from ._models import MediaGraphSystemData # type: ignore + from ._models import MediaGraphTlsEndpoint # type: ignore + from ._models import MediaGraphTlsValidationOptions # type: ignore + from ._models import MediaGraphTopology # type: ignore + from ._models import MediaGraphTopologyCollection # type: ignore + from ._models import MediaGraphTopologyDeleteRequest # type: ignore + from ._models import MediaGraphTopologyGetRequest # type: ignore + from ._models import MediaGraphTopologyListRequest # type: ignore + from ._models import MediaGraphTopologyProperties # type: ignore + from ._models import MediaGraphTopologySetRequest # type: ignore + from ._models import MediaGraphTopologySetRequestBody # type: ignore + from ._models import MediaGraphUnsecuredEndpoint # type: ignore + from ._models import MediaGraphUsernamePasswordCredentials # type: ignore + from ._models import OperationBase # type: ignore + +from ._definitionsfor_live_video_analyticson_io_tedge_enums import ( + MediaGraphGrpcExtensionDataTransferMode, + MediaGraphImageEncodingFormat, + MediaGraphImageFormatRawPixelFormat, + MediaGraphImageScaleMode, + MediaGraphInstanceState, + MediaGraphMotionDetectionSensitivity, + MediaGraphOutputSelectorOperator, + MediaGraphParameterType, + MediaGraphRtspTransport, +) + +__all__ = [ + 'ItemNonSetRequestBase', + 'MediaGraphAssetSink', + 'MediaGraphCertificateSource', + 'MediaGraphCognitiveServicesVisionExtension', + 'MediaGraphCredentials', + 'MediaGraphEndpoint', + 'MediaGraphExtensionProcessorBase', + 'MediaGraphFileSink', + 'MediaGraphFrameRateFilterProcessor', + 'MediaGraphGrpcExtension', + 'MediaGraphGrpcExtensionDataTransfer', + 'MediaGraphHttpExtension', + 'MediaGraphHttpHeaderCredentials', + 'MediaGraphImage', + 'MediaGraphImageFormat', + 'MediaGraphImageFormatEncoded', + 'MediaGraphImageFormatRaw', + 'MediaGraphImageScale', + 'MediaGraphInstance', + 'MediaGraphInstanceActivateRequest', + 'MediaGraphInstanceCollection', + 'MediaGraphInstanceDeActivateRequest', + 'MediaGraphInstanceDeleteRequest', + 'MediaGraphInstanceGetRequest', + 'MediaGraphInstanceListRequest', + 'MediaGraphInstanceProperties', + 'MediaGraphInstanceSetRequest', + 'MediaGraphInstanceSetRequestBody', + 'MediaGraphIoTHubMessageSink', + 'MediaGraphIoTHubMessageSource', + 'MediaGraphMotionDetectionProcessor', + 'MediaGraphNodeInput', + 'MediaGraphOutputSelector', + 'MediaGraphParameterDeclaration', + 'MediaGraphParameterDefinition', + 'MediaGraphPemCertificateList', + 'MediaGraphProcessor', + 'MediaGraphRtspSource', + 'MediaGraphSignalGateProcessor', + 'MediaGraphSink', + 'MediaGraphSource', + 'MediaGraphSystemData', + 'MediaGraphTlsEndpoint', + 'MediaGraphTlsValidationOptions', + 'MediaGraphTopology', + 'MediaGraphTopologyCollection', + 'MediaGraphTopologyDeleteRequest', + 'MediaGraphTopologyGetRequest', + 'MediaGraphTopologyListRequest', + 'MediaGraphTopologyProperties', + 'MediaGraphTopologySetRequest', + 'MediaGraphTopologySetRequestBody', + 'MediaGraphUnsecuredEndpoint', + 'MediaGraphUsernamePasswordCredentials', + 'OperationBase', + 'MediaGraphGrpcExtensionDataTransferMode', + 'MediaGraphImageEncodingFormat', + 'MediaGraphImageFormatRawPixelFormat', + 'MediaGraphImageScaleMode', + 'MediaGraphInstanceState', + 'MediaGraphMotionDetectionSensitivity', + 'MediaGraphOutputSelectorOperator', + 'MediaGraphParameterType', + 'MediaGraphRtspTransport', +] diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py new file mode 100644 index 000000000000..6e78e4728244 --- /dev/null +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum, EnumMeta +from six import with_metaclass + +class _CaseInsensitiveEnumMeta(EnumMeta): + def __getitem__(self, name): + return super().__getitem__(name.upper()) + + def __getattr__(cls, name): + """Return the enum member matching `name` + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + """ + try: + return cls._member_map_[name.upper()] + except KeyError: + raise AttributeError(name) + + +class MediaGraphGrpcExtensionDataTransferMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """How frame data should be transmitted to the inferencing engine. + """ + + EMBEDDED = "Embedded" #: Frames are transferred embedded into the gRPC messages. + SHARED_MEMORY = "SharedMemory" #: Frames are transferred through shared memory. + +class MediaGraphImageEncodingFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The different encoding formats that can be used for the image. + """ + + JPEG = "Jpeg" #: JPEG image format. + BMP = "Bmp" #: BMP image format. + PNG = "Png" #: PNG image format. + +class MediaGraphImageFormatRawPixelFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """pixel format + """ + + YUV420_P = "Yuv420p" #: Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples). + RGB565_BE = "Rgb565be" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian. + RGB565_LE = "Rgb565le" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian. + RGB555_BE = "Rgb555be" #: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined. + RGB555_LE = "Rgb555le" #: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined. + RGB24 = "Rgb24" #: Packed RGB 8:8:8, 24bpp, RGBRGB. + BGR24 = "Bgr24" #: Packed RGB 8:8:8, 24bpp, BGRBGR. + ARGB = "Argb" #: Packed ARGB 8:8:8:8, 32bpp, ARGBARGB. + RGBA = "Rgba" #: Packed RGBA 8:8:8:8, 32bpp, RGBARGBA. + ABGR = "Abgr" #: Packed ABGR 8:8:8:8, 32bpp, ABGRABGR. + BGRA = "Bgra" #: Packed BGRA 8:8:8:8, 32bpp, BGRABGRA. + +class MediaGraphImageScaleMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Describes the modes for scaling an input video frame into an image, before it is sent to an + inference engine. + """ + + PRESERVE_ASPECT_RATIO = "PreserveAspectRatio" #: Use the same aspect ratio as the input frame. + PAD = "Pad" #: Center pad the input frame to match the given dimensions. + STRETCH = "Stretch" #: Stretch input frame to match given dimensions. + +class MediaGraphInstanceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Allowed states for a graph Instance. + """ + + INACTIVE = "Inactive" #: Inactive state. + ACTIVATING = "Activating" #: Activating state. + ACTIVE = "Active" #: Active state. + DEACTIVATING = "Deactivating" #: Deactivating state. + +class MediaGraphMotionDetectionSensitivity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Enumeration that specifies the sensitivity of the motion detection processor. + """ + + LOW = "Low" #: Low Sensitivity. + MEDIUM = "Medium" #: Medium Sensitivity. + HIGH = "High" #: High Sensitivity. + +class MediaGraphOutputSelectorOperator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The operator to compare streams by. + """ + + IS_ENUM = "is" #: A media type is the same type or a subtype. + IS_NOT = "isNot" #: A media type is not the same type or a subtype. + +class MediaGraphParameterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """name + """ + + STRING = "String" #: A string parameter value. + SECRET_STRING = "SecretString" #: A string to hold sensitive information as parameter value. + INT = "Int" #: A 32-bit signed integer as parameter value. + DOUBLE = "Double" #: A 64-bit double-precision floating point type as parameter value. + BOOL = "Bool" #: A boolean value that is either true or false. + +class MediaGraphRtspTransport(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Underlying RTSP transport. This is used to enable or disable HTTP tunneling. + """ + + HTTP = "Http" #: HTTP/HTTPS transport. This should be used when HTTP tunneling is desired. + TCP = "Tcp" #: TCP transport. This should be used when HTTP tunneling is NOT desired. diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models.py new file mode 100644 index 000000000000..62f58c7ea385 --- /dev/null +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models.py @@ -0,0 +1,2008 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import msrest.serialization + + +class OperationBase(msrest.serialization.Model): + """OperationBase. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + _subtype_map = { + 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(OperationBase, self).__init__(**kwargs) + self.method_name = None # type: Optional[str] + + +class ItemNonSetRequestBase(OperationBase): + """ItemNonSetRequestBase. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphInstanceActivateRequest, MediaGraphInstanceDeActivateRequest, MediaGraphInstanceDeleteRequest, MediaGraphInstanceGetRequest, MediaGraphTopologyDeleteRequest, MediaGraphTopologyGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(ItemNonSetRequestBase, self).__init__(**kwargs) + self.method_name = 'ItemNonSetRequestBase' # type: str + self.name = kwargs['name'] + + +class MediaGraphSink(msrest.serialization.Model): + """Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphAssetSink, MediaGraphFileSink, MediaGraphIoTHubMessageSink. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphAssetSink': 'MediaGraphAssetSink', '#Microsoft.Media.MediaGraphFileSink': 'MediaGraphFileSink', '#Microsoft.Media.MediaGraphIoTHubMessageSink': 'MediaGraphIoTHubMessageSink'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSink, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = kwargs['name'] + self.inputs = kwargs['inputs'] + + +class MediaGraphAssetSink(MediaGraphSink): + """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param asset_name_pattern: A name pattern when creating new assets. + :type asset_name_pattern: str + :param segment_length: When writing media to an asset, wait until at least this duration of + media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum + of 30 seconds and a recommended maximum of 5 minutes. + :type segment_length: ~datetime.timedelta + :param local_media_cache_path: Path to a local file system directory for temporary caching of + media, before writing to an Asset. Used when the Edge device is temporarily disconnected from + Azure. + :type local_media_cache_path: str + :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for + temporary caching of media. + :type local_media_cache_maximum_size_mi_b: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, + 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, + 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, + 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphAssetSink, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphAssetSink' # type: str + self.asset_name_pattern = kwargs.get('asset_name_pattern', None) + self.segment_length = kwargs.get('segment_length', None) + self.local_media_cache_path = kwargs.get('local_media_cache_path', None) + self.local_media_cache_maximum_size_mi_b = kwargs.get('local_media_cache_maximum_size_mi_b', None) + + +class MediaGraphCertificateSource(msrest.serialization.Model): + """Base class for certificate sources. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphPemCertificateList. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphPemCertificateList': 'MediaGraphPemCertificateList'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCertificateSource, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphProcessor(msrest.serialization.Model): + """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphProcessor, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = kwargs['name'] + self.inputs = kwargs['inputs'] + + +class MediaGraphExtensionProcessorBase(MediaGraphProcessor): + """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension': 'MediaGraphCognitiveServicesVisionExtension', '#Microsoft.Media.MediaGraphGrpcExtension': 'MediaGraphGrpcExtension', '#Microsoft.Media.MediaGraphHttpExtension': 'MediaGraphHttpExtension'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphExtensionProcessorBase, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str + self.endpoint = kwargs.get('endpoint', None) + self.image = kwargs.get('image', None) + + +class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCognitiveServicesVisionExtension, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension' # type: str + + +class MediaGraphCredentials(msrest.serialization.Model): + """Credentials to present during authentication. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphHttpHeaderCredentials, MediaGraphUsernamePasswordCredentials. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphHttpHeaderCredentials': 'MediaGraphHttpHeaderCredentials', '#Microsoft.Media.MediaGraphUsernamePasswordCredentials': 'MediaGraphUsernamePasswordCredentials'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCredentials, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphEndpoint(msrest.serialization.Model): + """Base class for endpoints. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphTlsEndpoint, MediaGraphUnsecuredEndpoint. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphTlsEndpoint': 'MediaGraphTlsEndpoint', '#Microsoft.Media.MediaGraphUnsecuredEndpoint': 'MediaGraphUnsecuredEndpoint'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphEndpoint, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.credentials = kwargs.get('credentials', None) + self.url = kwargs['url'] + + +class MediaGraphFileSink(MediaGraphSink): + """Enables a media graph to write/store media (video and audio) to a file on the Edge device. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param file_path_pattern: Required. Absolute file path pattern for creating new files on the + Edge device. + :type file_path_pattern: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + 'file_path_pattern': {'required': True, 'min_length': 1}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphFileSink, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str + self.file_path_pattern = kwargs['file_path_pattern'] + + +class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): + """Limits the frame rate on the input video stream based on the maximumFps property. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not + exceed this limit. + :type maximum_fps: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphFrameRateFilterProcessor, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str + self.maximum_fps = kwargs.get('maximum_fps', None) + + +class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + :param data_transfer: Required. How media should be transferred to the inferencing engine. + :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + 'data_transfer': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphGrpcExtension, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str + self.data_transfer = kwargs['data_transfer'] + + +class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): + """Describes how media should be transferred to the inferencing engine. + + All required parameters must be populated in order to send to Azure. + + :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if + mode is SharedMemory. Should not be specificed otherwise. + :type shared_memory_size_mi_b: str + :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible + values include: "Embedded", "SharedMemory". + :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode + """ + + _validation = { + 'mode': {'required': True}, + } + + _attribute_map = { + 'shared_memory_size_mi_b': {'key': 'sharedMemorySizeMiB', 'type': 'str'}, + 'mode': {'key': 'mode', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphGrpcExtensionDataTransfer, self).__init__(**kwargs) + self.shared_memory_size_mi_b = kwargs.get('shared_memory_size_mi_b', None) + self.mode = kwargs['mode'] + + +class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphHttpExtension, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphHttpExtension' # type: str + + +class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): + """Http header service credentials. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param header_name: Required. HTTP header name. + :type header_name: str + :param header_value: Required. HTTP header value. + :type header_value: str + """ + + _validation = { + 'type': {'required': True}, + 'header_name': {'required': True}, + 'header_value': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'header_name': {'key': 'headerName', 'type': 'str'}, + 'header_value': {'key': 'headerValue', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphHttpHeaderCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphHttpHeaderCredentials' # type: str + self.header_name = kwargs['header_name'] + self.header_value = kwargs['header_value'] + + +class MediaGraphImage(msrest.serialization.Model): + """Describes the properties of an image frame. + + :param scale: The scaling mode for the image. + :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale + :param format: Encoding settings for an image. + :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat + """ + + _attribute_map = { + 'scale': {'key': 'scale', 'type': 'MediaGraphImageScale'}, + 'format': {'key': 'format', 'type': 'MediaGraphImageFormat'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImage, self).__init__(**kwargs) + self.scale = kwargs.get('scale', None) + self.format = kwargs.get('format', None) + + +class MediaGraphImageFormat(msrest.serialization.Model): + """Encoding settings for an image. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormat, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphImageFormatEncoded(MediaGraphImageFormat): + """Allowed formats for the image. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param encoding: The different encoding formats that can be used for the image. Possible values + include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". + :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat + :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best + quality). + :type quality: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'encoding': {'key': 'encoding', 'type': 'str'}, + 'quality': {'key': 'quality', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str + self.encoding = kwargs.get('encoding', "Jpeg") + self.quality = kwargs.get('quality', None) + + +class MediaGraphImageFormatRaw(MediaGraphImageFormat): + """Encoding settings for raw images. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", + "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". + :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'pixel_format': {'key': 'pixelFormat', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormatRaw, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatRaw' # type: str + self.pixel_format = kwargs.get('pixel_format', None) + + +class MediaGraphImageScale(msrest.serialization.Model): + """The scaling mode for the image. + + :param mode: Describes the modes for scaling an input video frame into an image, before it is + sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". + :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode + :param width: The desired output width of the image. + :type width: str + :param height: The desired output height of the image. + :type height: str + """ + + _attribute_map = { + 'mode': {'key': 'mode', 'type': 'str'}, + 'width': {'key': 'width', 'type': 'str'}, + 'height': {'key': 'height', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageScale, self).__init__(**kwargs) + self.mode = kwargs.get('mode', None) + self.width = kwargs.get('width', None) + self.height = kwargs.get('height', None) + + +class MediaGraphInstance(msrest.serialization.Model): + """Represents a Media Graph instance. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Properties of a Media Graph instance. + :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstance, self).__init__(**kwargs) + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): + """MediaGraphInstanceActivateRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceActivateRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceActivate' # type: str + + +class MediaGraphInstanceCollection(msrest.serialization.Model): + """Collection of graph instances. + + :param value: Collection of graph instances. + :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] + :param continuation_token: Continuation token to use in subsequent calls to enumerate through + the graph instance collection (when the collection contains too many results to return in one + response). + :type continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[MediaGraphInstance]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceCollection, self).__init__(**kwargs) + self.value = kwargs.get('value', None) + self.continuation_token = kwargs.get('continuation_token', None) + + +class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): + """MediaGraphInstanceDeActivateRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceDeActivateRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceDeactivate' # type: str + + +class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): + """MediaGraphInstanceDeleteRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceDeleteRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceDelete' # type: str + + +class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): + """MediaGraphInstanceGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceGetRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceGet' # type: str + + +class MediaGraphInstanceListRequest(OperationBase): + """MediaGraphInstanceListRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceListRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceList' # type: str + + +class MediaGraphInstanceProperties(msrest.serialization.Model): + """Properties of a Media Graph instance. + + :param description: An optional description for the instance. + :type description: str + :param topology_name: The name of the graph topology that this instance will run. A topology + with this name should already have been set in the Edge module. + :type topology_name: str + :param parameters: List of one or more graph instance parameters. + :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] + :param state: Allowed states for a graph Instance. Possible values include: "Inactive", + "Activating", "Active", "Deactivating". + :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'topology_name': {'key': 'topologyName', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDefinition]'}, + 'state': {'key': 'state', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceProperties, self).__init__(**kwargs) + self.description = kwargs.get('description', None) + self.topology_name = kwargs.get('topology_name', None) + self.parameters = kwargs.get('parameters', None) + self.state = kwargs.get('state', None) + + +class MediaGraphInstanceSetRequest(OperationBase): + """MediaGraphInstanceSetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param instance: Required. Represents a Media Graph instance. + :type instance: ~azure.media.lva.edge.models.MediaGraphInstance + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'instance': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceSetRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceSet' # type: str + self.instance = kwargs['instance'] + + +class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): + """MediaGraphInstanceSetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Properties of a Media Graph instance. + :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceSetRequestBody, self).__init__(**kwargs) + self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str + self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class MediaGraphIoTHubMessageSink(MediaGraphSink): + """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param hub_output_name: Name of the output path to which the graph will publish message. These + messages can then be delivered to desired destinations by declaring routes referencing the + output path in the IoT Edge deployment manifest. + :type hub_output_name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'hub_output_name': {'key': 'hubOutputName', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphIoTHubMessageSink, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSink' # type: str + self.hub_output_name = kwargs.get('hub_output_name', None) + + +class MediaGraphSource(msrest.serialization.Model): + """Media graph source. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphIoTHubMessageSource': 'MediaGraphIoTHubMessageSource', '#Microsoft.Media.MediaGraphRtspSource': 'MediaGraphRtspSource'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSource, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = kwargs['name'] + + +class MediaGraphIoTHubMessageSource(MediaGraphSource): + """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + :param hub_input_name: Name of the input path where messages can be routed to (via routes + declared in the IoT Edge deployment manifest). + :type hub_input_name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'hub_input_name': {'key': 'hubInputName', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphIoTHubMessageSource, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSource' # type: str + self.hub_input_name = kwargs.get('hub_input_name', None) + + +class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): + """A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param sensitivity: Enumeration that specifies the sensitivity of the motion detection + processor. Possible values include: "Low", "Medium", "High". + :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity + :param output_motion_region: Indicates whether the processor should detect and output the + regions, within the video frame, where motion was detected. Default is true. + :type output_motion_region: bool + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, + 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphMotionDetectionProcessor, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str + self.sensitivity = kwargs.get('sensitivity', None) + self.output_motion_region = kwargs.get('output_motion_region', None) + + +class MediaGraphNodeInput(msrest.serialization.Model): + """Represents the input to any node in a media graph. + + :param node_name: The name of another node in the media graph, the output of which is used as + input to this node. + :type node_name: str + :param output_selectors: Allows for the selection of particular streams from another node. + :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] + """ + + _attribute_map = { + 'node_name': {'key': 'nodeName', 'type': 'str'}, + 'output_selectors': {'key': 'outputSelectors', 'type': '[MediaGraphOutputSelector]'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphNodeInput, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) + self.output_selectors = kwargs.get('output_selectors', None) + + +class MediaGraphOutputSelector(msrest.serialization.Model): + """Allows for the selection of particular streams from another node. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar property: The stream property to compare with. Default value: "mediaType". + :vartype property: str + :param operator: The operator to compare streams by. Possible values include: "is", "isNot". + :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator + :param value: Value to compare against. + :type value: str + """ + + _validation = { + 'property': {'constant': True}, + } + + _attribute_map = { + 'property': {'key': 'property', 'type': 'str'}, + 'operator': {'key': 'operator', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + property = "mediaType" + + def __init__( + self, + **kwargs + ): + super(MediaGraphOutputSelector, self).__init__(**kwargs) + self.operator = kwargs.get('operator', None) + self.value = kwargs.get('value', None) + + +class MediaGraphParameterDeclaration(msrest.serialization.Model): + """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the parameter. + :type name: str + :param type: Required. name. Possible values include: "String", "SecretString", "Int", + "Double", "Bool". + :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType + :param description: Description of the parameter. + :type description: str + :param default: The default value for the parameter, to be used if the graph instance does not + specify a value. + :type default: str + """ + + _validation = { + 'name': {'required': True, 'max_length': 64, 'min_length': 0}, + 'type': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'default': {'key': 'default', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphParameterDeclaration, self).__init__(**kwargs) + self.name = kwargs['name'] + self.type = kwargs['type'] + self.description = kwargs.get('description', None) + self.default = kwargs.get('default', None) + + +class MediaGraphParameterDefinition(msrest.serialization.Model): + """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. Name of parameter as defined in the graph topology. + :type name: str + :param value: Required. Value of parameter. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphParameterDefinition, self).__init__(**kwargs) + self.name = kwargs['name'] + self.value = kwargs['value'] + + +class MediaGraphPemCertificateList(MediaGraphCertificateSource): + """A list of PEM formatted certificates. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param certificates: Required. PEM formatted public certificates one per entry. + :type certificates: list[str] + """ + + _validation = { + 'type': {'required': True}, + 'certificates': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'certificates': {'key': 'certificates', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphPemCertificateList, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphPemCertificateList' # type: str + self.certificates = kwargs['certificates'] + + +class MediaGraphRtspSource(MediaGraphSource): + """Enables a graph to capture media from a RTSP server. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. + Possible values include: "Http", "Tcp". + :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport + :param endpoint: Required. RTSP endpoint of the stream that is being connected to. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'endpoint': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'transport': {'key': 'transport', 'type': 'str'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphRtspSource, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphRtspSource' # type: str + self.transport = kwargs.get('transport', None) + self.endpoint = kwargs['endpoint'] + + +class MediaGraphSignalGateProcessor(MediaGraphProcessor): + """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param activation_evaluation_window: The period of time over which the gate gathers input + events, before evaluating them. + :type activation_evaluation_window: str + :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It + is an offset between the time the event is received, and the timestamp of the first media + sample (eg. video frame) that is allowed through by the gate. + :type activation_signal_offset: str + :param minimum_activation_time: The minimum period for which the gate remains open, in the + absence of subsequent triggers (events). + :type minimum_activation_time: str + :param maximum_activation_time: The maximum period for which the gate remains open, in the + presence of subsequent events. + :type maximum_activation_time: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'activation_evaluation_window': {'key': 'activationEvaluationWindow', 'type': 'str'}, + 'activation_signal_offset': {'key': 'activationSignalOffset', 'type': 'str'}, + 'minimum_activation_time': {'key': 'minimumActivationTime', 'type': 'str'}, + 'maximum_activation_time': {'key': 'maximumActivationTime', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSignalGateProcessor, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphSignalGateProcessor' # type: str + self.activation_evaluation_window = kwargs.get('activation_evaluation_window', None) + self.activation_signal_offset = kwargs.get('activation_signal_offset', None) + self.minimum_activation_time = kwargs.get('minimum_activation_time', None) + self.maximum_activation_time = kwargs.get('maximum_activation_time', None) + + +class MediaGraphSystemData(msrest.serialization.Model): + """Graph system data. + + :param created_at: The timestamp of resource creation (UTC). + :type created_at: ~datetime.datetime + :param last_modified_at: The timestamp of resource last modification (UTC). + :type last_modified_at: ~datetime.datetime + """ + + _attribute_map = { + 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, + 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSystemData, self).__init__(**kwargs) + self.created_at = kwargs.get('created_at', None) + self.last_modified_at = kwargs.get('last_modified_at', None) + + +class MediaGraphTlsEndpoint(MediaGraphEndpoint): + """An endpoint that the graph can connect to, which must be connected over TLS/SSL. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null + designates that Azure Media Service's source of trust should be used. + :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource + :param validation_options: Validation options to use when authenticating a TLS connection. By + default, strict validation is used. + :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'MediaGraphCertificateSource'}, + 'validation_options': {'key': 'validationOptions', 'type': 'MediaGraphTlsValidationOptions'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTlsEndpoint, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphTlsEndpoint' # type: str + self.trusted_certificates = kwargs.get('trusted_certificates', None) + self.validation_options = kwargs.get('validation_options', None) + + +class MediaGraphTlsValidationOptions(msrest.serialization.Model): + """Options for controlling the authentication of TLS endpoints. + + :param ignore_hostname: Boolean value ignoring the host name (common name) during validation. + :type ignore_hostname: str + :param ignore_signature: Boolean value ignoring the integrity of the certificate chain at the + current time. + :type ignore_signature: str + """ + + _attribute_map = { + 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, + 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTlsValidationOptions, self).__init__(**kwargs) + self.ignore_hostname = kwargs.get('ignore_hostname', None) + self.ignore_signature = kwargs.get('ignore_signature', None) + + +class MediaGraphTopology(msrest.serialization.Model): + """Describes a graph topology. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Describes the properties of a graph topology. + :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopology, self).__init__(**kwargs) + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class MediaGraphTopologyCollection(msrest.serialization.Model): + """Collection of graph topologies. + + :param value: Collection of graph topologies. + :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] + :param continuation_token: Continuation token to use in subsequent calls to enumerate through + the graph topologies collection (when the collection contains too many results to return in one + response). + :type continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[MediaGraphTopology]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyCollection, self).__init__(**kwargs) + self.value = kwargs.get('value', None) + self.continuation_token = kwargs.get('continuation_token', None) + + +class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): + """MediaGraphTopologyDeleteRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyDeleteRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologyDelete' # type: str + + +class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): + """MediaGraphTopologyGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyGetRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologyGet' # type: str + + +class MediaGraphTopologyListRequest(OperationBase): + """MediaGraphTopologyListRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyListRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologyList' # type: str + + +class MediaGraphTopologyProperties(msrest.serialization.Model): + """Describes the properties of a graph topology. + + :param description: An optional description for the instance. + :type description: str + :param parameters: An optional description for the instance. + :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] + :param sources: An optional description for the instance. + :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] + :param processors: An optional description for the instance. + :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] + :param sinks: name. + :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDeclaration]'}, + 'sources': {'key': 'sources', 'type': '[MediaGraphSource]'}, + 'processors': {'key': 'processors', 'type': '[MediaGraphProcessor]'}, + 'sinks': {'key': 'sinks', 'type': '[MediaGraphSink]'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyProperties, self).__init__(**kwargs) + self.description = kwargs.get('description', None) + self.parameters = kwargs.get('parameters', None) + self.sources = kwargs.get('sources', None) + self.processors = kwargs.get('processors', None) + self.sinks = kwargs.get('sinks', None) + + +class MediaGraphTopologySetRequest(OperationBase): + """MediaGraphTopologySetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param graph: Required. Describes a graph topology. + :type graph: ~azure.media.lva.edge.models.MediaGraphTopology + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'graph': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologySetRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologySet' # type: str + self.graph = kwargs['graph'] + + +class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): + """MediaGraphTopologySetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Describes the properties of a graph topology. + :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologySetRequestBody, self).__init__(**kwargs) + self.method_name = 'MediaGraphTopologySetRequestBody' # type: str + self.method_name = 'MediaGraphTopologySetRequestBody' # type: str + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): + """An endpoint that the media graph can connect to, with no encryption in transit. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphUnsecuredEndpoint, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphUnsecuredEndpoint' # type: str + + +class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): + """Username/password credential pair. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param username: Required. Username for a username/password pair. + :type username: str + :param password: Password for a username/password pair. + :type password: str + """ + + _validation = { + 'type': {'required': True}, + 'username': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'username': {'key': 'username', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphUsernamePasswordCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphUsernamePasswordCredentials' # type: str + self.username = kwargs['username'] + self.password = kwargs.get('password', None) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models_py3.py new file mode 100644 index 000000000000..5de3adde8e11 --- /dev/null +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models_py3.py @@ -0,0 +1,2185 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +from typing import List, Optional, Union + +import msrest.serialization + +from ._definitionsfor_live_video_analyticson_io_tedge_enums import * + + +class OperationBase(msrest.serialization.Model): + """OperationBase. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + _subtype_map = { + 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(OperationBase, self).__init__(**kwargs) + self.method_name = None # type: Optional[str] + + +class ItemNonSetRequestBase(OperationBase): + """ItemNonSetRequestBase. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphInstanceActivateRequest, MediaGraphInstanceDeActivateRequest, MediaGraphInstanceDeleteRequest, MediaGraphInstanceGetRequest, MediaGraphTopologyDeleteRequest, MediaGraphTopologyGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(ItemNonSetRequestBase, self).__init__(**kwargs) + self.method_name = 'ItemNonSetRequestBase' # type: str + self.name = name + + +class MediaGraphSink(msrest.serialization.Model): + """Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphAssetSink, MediaGraphFileSink, MediaGraphIoTHubMessageSink. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphAssetSink': 'MediaGraphAssetSink', '#Microsoft.Media.MediaGraphFileSink': 'MediaGraphFileSink', '#Microsoft.Media.MediaGraphIoTHubMessageSink': 'MediaGraphIoTHubMessageSink'} + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + **kwargs + ): + super(MediaGraphSink, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = name + self.inputs = inputs + + +class MediaGraphAssetSink(MediaGraphSink): + """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param asset_name_pattern: A name pattern when creating new assets. + :type asset_name_pattern: str + :param segment_length: When writing media to an asset, wait until at least this duration of + media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum + of 30 seconds and a recommended maximum of 5 minutes. + :type segment_length: ~datetime.timedelta + :param local_media_cache_path: Path to a local file system directory for temporary caching of + media, before writing to an Asset. Used when the Edge device is temporarily disconnected from + Azure. + :type local_media_cache_path: str + :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for + temporary caching of media. + :type local_media_cache_maximum_size_mi_b: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, + 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, + 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, + 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + asset_name_pattern: Optional[str] = None, + segment_length: Optional[datetime.timedelta] = None, + local_media_cache_path: Optional[str] = None, + local_media_cache_maximum_size_mi_b: Optional[str] = None, + **kwargs + ): + super(MediaGraphAssetSink, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphAssetSink' # type: str + self.asset_name_pattern = asset_name_pattern + self.segment_length = segment_length + self.local_media_cache_path = local_media_cache_path + self.local_media_cache_maximum_size_mi_b = local_media_cache_maximum_size_mi_b + + +class MediaGraphCertificateSource(msrest.serialization.Model): + """Base class for certificate sources. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphPemCertificateList. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphPemCertificateList': 'MediaGraphPemCertificateList'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCertificateSource, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphProcessor(msrest.serialization.Model): + """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + **kwargs + ): + super(MediaGraphProcessor, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = name + self.inputs = inputs + + +class MediaGraphExtensionProcessorBase(MediaGraphProcessor): + """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension': 'MediaGraphCognitiveServicesVisionExtension', '#Microsoft.Media.MediaGraphGrpcExtension': 'MediaGraphGrpcExtension', '#Microsoft.Media.MediaGraphHttpExtension': 'MediaGraphHttpExtension'} + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + endpoint: Optional["MediaGraphEndpoint"] = None, + image: Optional["MediaGraphImage"] = None, + **kwargs + ): + super(MediaGraphExtensionProcessorBase, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str + self.endpoint = endpoint + self.image = image + + +class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + endpoint: Optional["MediaGraphEndpoint"] = None, + image: Optional["MediaGraphImage"] = None, + **kwargs + ): + super(MediaGraphCognitiveServicesVisionExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + self.type = '#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension' # type: str + + +class MediaGraphCredentials(msrest.serialization.Model): + """Credentials to present during authentication. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphHttpHeaderCredentials, MediaGraphUsernamePasswordCredentials. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphHttpHeaderCredentials': 'MediaGraphHttpHeaderCredentials', '#Microsoft.Media.MediaGraphUsernamePasswordCredentials': 'MediaGraphUsernamePasswordCredentials'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCredentials, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphEndpoint(msrest.serialization.Model): + """Base class for endpoints. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphTlsEndpoint, MediaGraphUnsecuredEndpoint. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphTlsEndpoint': 'MediaGraphTlsEndpoint', '#Microsoft.Media.MediaGraphUnsecuredEndpoint': 'MediaGraphUnsecuredEndpoint'} + } + + def __init__( + self, + *, + url: str, + credentials: Optional["MediaGraphCredentials"] = None, + **kwargs + ): + super(MediaGraphEndpoint, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.credentials = credentials + self.url = url + + +class MediaGraphFileSink(MediaGraphSink): + """Enables a media graph to write/store media (video and audio) to a file on the Edge device. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param file_path_pattern: Required. Absolute file path pattern for creating new files on the + Edge device. + :type file_path_pattern: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + 'file_path_pattern': {'required': True, 'min_length': 1}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + file_path_pattern: str, + **kwargs + ): + super(MediaGraphFileSink, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str + self.file_path_pattern = file_path_pattern + + +class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): + """Limits the frame rate on the input video stream based on the maximumFps property. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not + exceed this limit. + :type maximum_fps: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + maximum_fps: Optional[str] = None, + **kwargs + ): + super(MediaGraphFrameRateFilterProcessor, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str + self.maximum_fps = maximum_fps + + +class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + :param data_transfer: Required. How media should be transferred to the inferencing engine. + :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + 'data_transfer': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + data_transfer: "MediaGraphGrpcExtensionDataTransfer", + endpoint: Optional["MediaGraphEndpoint"] = None, + image: Optional["MediaGraphImage"] = None, + **kwargs + ): + super(MediaGraphGrpcExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str + self.data_transfer = data_transfer + + +class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): + """Describes how media should be transferred to the inferencing engine. + + All required parameters must be populated in order to send to Azure. + + :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if + mode is SharedMemory. Should not be specificed otherwise. + :type shared_memory_size_mi_b: str + :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible + values include: "Embedded", "SharedMemory". + :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode + """ + + _validation = { + 'mode': {'required': True}, + } + + _attribute_map = { + 'shared_memory_size_mi_b': {'key': 'sharedMemorySizeMiB', 'type': 'str'}, + 'mode': {'key': 'mode', 'type': 'str'}, + } + + def __init__( + self, + *, + mode: Union[str, "MediaGraphGrpcExtensionDataTransferMode"], + shared_memory_size_mi_b: Optional[str] = None, + **kwargs + ): + super(MediaGraphGrpcExtensionDataTransfer, self).__init__(**kwargs) + self.shared_memory_size_mi_b = shared_memory_size_mi_b + self.mode = mode + + +class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + endpoint: Optional["MediaGraphEndpoint"] = None, + image: Optional["MediaGraphImage"] = None, + **kwargs + ): + super(MediaGraphHttpExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + self.type = '#Microsoft.Media.MediaGraphHttpExtension' # type: str + + +class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): + """Http header service credentials. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param header_name: Required. HTTP header name. + :type header_name: str + :param header_value: Required. HTTP header value. + :type header_value: str + """ + + _validation = { + 'type': {'required': True}, + 'header_name': {'required': True}, + 'header_value': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'header_name': {'key': 'headerName', 'type': 'str'}, + 'header_value': {'key': 'headerValue', 'type': 'str'}, + } + + def __init__( + self, + *, + header_name: str, + header_value: str, + **kwargs + ): + super(MediaGraphHttpHeaderCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphHttpHeaderCredentials' # type: str + self.header_name = header_name + self.header_value = header_value + + +class MediaGraphImage(msrest.serialization.Model): + """Describes the properties of an image frame. + + :param scale: The scaling mode for the image. + :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale + :param format: Encoding settings for an image. + :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat + """ + + _attribute_map = { + 'scale': {'key': 'scale', 'type': 'MediaGraphImageScale'}, + 'format': {'key': 'format', 'type': 'MediaGraphImageFormat'}, + } + + def __init__( + self, + *, + scale: Optional["MediaGraphImageScale"] = None, + format: Optional["MediaGraphImageFormat"] = None, + **kwargs + ): + super(MediaGraphImage, self).__init__(**kwargs) + self.scale = scale + self.format = format + + +class MediaGraphImageFormat(msrest.serialization.Model): + """Encoding settings for an image. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormat, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphImageFormatEncoded(MediaGraphImageFormat): + """Allowed formats for the image. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param encoding: The different encoding formats that can be used for the image. Possible values + include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". + :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat + :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best + quality). + :type quality: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'encoding': {'key': 'encoding', 'type': 'str'}, + 'quality': {'key': 'quality', 'type': 'str'}, + } + + def __init__( + self, + *, + encoding: Optional[Union[str, "MediaGraphImageEncodingFormat"]] = "Jpeg", + quality: Optional[str] = None, + **kwargs + ): + super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str + self.encoding = encoding + self.quality = quality + + +class MediaGraphImageFormatRaw(MediaGraphImageFormat): + """Encoding settings for raw images. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", + "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". + :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'pixel_format': {'key': 'pixelFormat', 'type': 'str'}, + } + + def __init__( + self, + *, + pixel_format: Optional[Union[str, "MediaGraphImageFormatRawPixelFormat"]] = None, + **kwargs + ): + super(MediaGraphImageFormatRaw, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatRaw' # type: str + self.pixel_format = pixel_format + + +class MediaGraphImageScale(msrest.serialization.Model): + """The scaling mode for the image. + + :param mode: Describes the modes for scaling an input video frame into an image, before it is + sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". + :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode + :param width: The desired output width of the image. + :type width: str + :param height: The desired output height of the image. + :type height: str + """ + + _attribute_map = { + 'mode': {'key': 'mode', 'type': 'str'}, + 'width': {'key': 'width', 'type': 'str'}, + 'height': {'key': 'height', 'type': 'str'}, + } + + def __init__( + self, + *, + mode: Optional[Union[str, "MediaGraphImageScaleMode"]] = None, + width: Optional[str] = None, + height: Optional[str] = None, + **kwargs + ): + super(MediaGraphImageScale, self).__init__(**kwargs) + self.mode = mode + self.width = width + self.height = height + + +class MediaGraphInstance(msrest.serialization.Model): + """Represents a Media Graph instance. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Properties of a Media Graph instance. + :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, + } + + def __init__( + self, + *, + name: str, + system_data: Optional["MediaGraphSystemData"] = None, + properties: Optional["MediaGraphInstanceProperties"] = None, + **kwargs + ): + super(MediaGraphInstance, self).__init__(**kwargs) + self.name = name + self.system_data = system_data + self.properties = properties + + +class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): + """MediaGraphInstanceActivateRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphInstanceActivateRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphInstanceActivate' # type: str + + +class MediaGraphInstanceCollection(msrest.serialization.Model): + """Collection of graph instances. + + :param value: Collection of graph instances. + :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] + :param continuation_token: Continuation token to use in subsequent calls to enumerate through + the graph instance collection (when the collection contains too many results to return in one + response). + :type continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[MediaGraphInstance]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + *, + value: Optional[List["MediaGraphInstance"]] = None, + continuation_token: Optional[str] = None, + **kwargs + ): + super(MediaGraphInstanceCollection, self).__init__(**kwargs) + self.value = value + self.continuation_token = continuation_token + + +class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): + """MediaGraphInstanceDeActivateRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphInstanceDeActivateRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphInstanceDeactivate' # type: str + + +class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): + """MediaGraphInstanceDeleteRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphInstanceDeleteRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphInstanceDelete' # type: str + + +class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): + """MediaGraphInstanceGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphInstanceGetRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphInstanceGet' # type: str + + +class MediaGraphInstanceListRequest(OperationBase): + """MediaGraphInstanceListRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceListRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceList' # type: str + + +class MediaGraphInstanceProperties(msrest.serialization.Model): + """Properties of a Media Graph instance. + + :param description: An optional description for the instance. + :type description: str + :param topology_name: The name of the graph topology that this instance will run. A topology + with this name should already have been set in the Edge module. + :type topology_name: str + :param parameters: List of one or more graph instance parameters. + :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] + :param state: Allowed states for a graph Instance. Possible values include: "Inactive", + "Activating", "Active", "Deactivating". + :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'topology_name': {'key': 'topologyName', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDefinition]'}, + 'state': {'key': 'state', 'type': 'str'}, + } + + def __init__( + self, + *, + description: Optional[str] = None, + topology_name: Optional[str] = None, + parameters: Optional[List["MediaGraphParameterDefinition"]] = None, + state: Optional[Union[str, "MediaGraphInstanceState"]] = None, + **kwargs + ): + super(MediaGraphInstanceProperties, self).__init__(**kwargs) + self.description = description + self.topology_name = topology_name + self.parameters = parameters + self.state = state + + +class MediaGraphInstanceSetRequest(OperationBase): + """MediaGraphInstanceSetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param instance: Required. Represents a Media Graph instance. + :type instance: ~azure.media.lva.edge.models.MediaGraphInstance + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'instance': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + instance: "MediaGraphInstance", + **kwargs + ): + super(MediaGraphInstanceSetRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceSet' # type: str + self.instance = instance + + +class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): + """MediaGraphInstanceSetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Properties of a Media Graph instance. + :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + system_data: Optional["MediaGraphSystemData"] = None, + properties: Optional["MediaGraphInstanceProperties"] = None, + **kwargs + ): + super(MediaGraphInstanceSetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) + self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str + self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str + self.name = name + self.system_data = system_data + self.properties = properties + + +class MediaGraphIoTHubMessageSink(MediaGraphSink): + """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param hub_output_name: Name of the output path to which the graph will publish message. These + messages can then be delivered to desired destinations by declaring routes referencing the + output path in the IoT Edge deployment manifest. + :type hub_output_name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'hub_output_name': {'key': 'hubOutputName', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + hub_output_name: Optional[str] = None, + **kwargs + ): + super(MediaGraphIoTHubMessageSink, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSink' # type: str + self.hub_output_name = hub_output_name + + +class MediaGraphSource(msrest.serialization.Model): + """Media graph source. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphIoTHubMessageSource': 'MediaGraphIoTHubMessageSource', '#Microsoft.Media.MediaGraphRtspSource': 'MediaGraphRtspSource'} + } + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphSource, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = name + + +class MediaGraphIoTHubMessageSource(MediaGraphSource): + """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + :param hub_input_name: Name of the input path where messages can be routed to (via routes + declared in the IoT Edge deployment manifest). + :type hub_input_name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'hub_input_name': {'key': 'hubInputName', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + hub_input_name: Optional[str] = None, + **kwargs + ): + super(MediaGraphIoTHubMessageSource, self).__init__(name=name, **kwargs) + self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSource' # type: str + self.hub_input_name = hub_input_name + + +class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): + """A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param sensitivity: Enumeration that specifies the sensitivity of the motion detection + processor. Possible values include: "Low", "Medium", "High". + :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity + :param output_motion_region: Indicates whether the processor should detect and output the + regions, within the video frame, where motion was detected. Default is true. + :type output_motion_region: bool + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, + 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + sensitivity: Optional[Union[str, "MediaGraphMotionDetectionSensitivity"]] = None, + output_motion_region: Optional[bool] = None, + **kwargs + ): + super(MediaGraphMotionDetectionProcessor, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str + self.sensitivity = sensitivity + self.output_motion_region = output_motion_region + + +class MediaGraphNodeInput(msrest.serialization.Model): + """Represents the input to any node in a media graph. + + :param node_name: The name of another node in the media graph, the output of which is used as + input to this node. + :type node_name: str + :param output_selectors: Allows for the selection of particular streams from another node. + :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] + """ + + _attribute_map = { + 'node_name': {'key': 'nodeName', 'type': 'str'}, + 'output_selectors': {'key': 'outputSelectors', 'type': '[MediaGraphOutputSelector]'}, + } + + def __init__( + self, + *, + node_name: Optional[str] = None, + output_selectors: Optional[List["MediaGraphOutputSelector"]] = None, + **kwargs + ): + super(MediaGraphNodeInput, self).__init__(**kwargs) + self.node_name = node_name + self.output_selectors = output_selectors + + +class MediaGraphOutputSelector(msrest.serialization.Model): + """Allows for the selection of particular streams from another node. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar property: The stream property to compare with. Default value: "mediaType". + :vartype property: str + :param operator: The operator to compare streams by. Possible values include: "is", "isNot". + :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator + :param value: Value to compare against. + :type value: str + """ + + _validation = { + 'property': {'constant': True}, + } + + _attribute_map = { + 'property': {'key': 'property', 'type': 'str'}, + 'operator': {'key': 'operator', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + property = "mediaType" + + def __init__( + self, + *, + operator: Optional[Union[str, "MediaGraphOutputSelectorOperator"]] = None, + value: Optional[str] = None, + **kwargs + ): + super(MediaGraphOutputSelector, self).__init__(**kwargs) + self.operator = operator + self.value = value + + +class MediaGraphParameterDeclaration(msrest.serialization.Model): + """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the parameter. + :type name: str + :param type: Required. name. Possible values include: "String", "SecretString", "Int", + "Double", "Bool". + :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType + :param description: Description of the parameter. + :type description: str + :param default: The default value for the parameter, to be used if the graph instance does not + specify a value. + :type default: str + """ + + _validation = { + 'name': {'required': True, 'max_length': 64, 'min_length': 0}, + 'type': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'default': {'key': 'default', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + type: Union[str, "MediaGraphParameterType"], + description: Optional[str] = None, + default: Optional[str] = None, + **kwargs + ): + super(MediaGraphParameterDeclaration, self).__init__(**kwargs) + self.name = name + self.type = type + self.description = description + self.default = default + + +class MediaGraphParameterDefinition(msrest.serialization.Model): + """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. Name of parameter as defined in the graph topology. + :type name: str + :param value: Required. Value of parameter. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + value: str, + **kwargs + ): + super(MediaGraphParameterDefinition, self).__init__(**kwargs) + self.name = name + self.value = value + + +class MediaGraphPemCertificateList(MediaGraphCertificateSource): + """A list of PEM formatted certificates. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param certificates: Required. PEM formatted public certificates one per entry. + :type certificates: list[str] + """ + + _validation = { + 'type': {'required': True}, + 'certificates': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'certificates': {'key': 'certificates', 'type': '[str]'}, + } + + def __init__( + self, + *, + certificates: List[str], + **kwargs + ): + super(MediaGraphPemCertificateList, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphPemCertificateList' # type: str + self.certificates = certificates + + +class MediaGraphRtspSource(MediaGraphSource): + """Enables a graph to capture media from a RTSP server. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. + Possible values include: "Http", "Tcp". + :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport + :param endpoint: Required. RTSP endpoint of the stream that is being connected to. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'endpoint': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'transport': {'key': 'transport', 'type': 'str'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + } + + def __init__( + self, + *, + name: str, + endpoint: "MediaGraphEndpoint", + transport: Optional[Union[str, "MediaGraphRtspTransport"]] = None, + **kwargs + ): + super(MediaGraphRtspSource, self).__init__(name=name, **kwargs) + self.type = '#Microsoft.Media.MediaGraphRtspSource' # type: str + self.transport = transport + self.endpoint = endpoint + + +class MediaGraphSignalGateProcessor(MediaGraphProcessor): + """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param activation_evaluation_window: The period of time over which the gate gathers input + events, before evaluating them. + :type activation_evaluation_window: str + :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It + is an offset between the time the event is received, and the timestamp of the first media + sample (eg. video frame) that is allowed through by the gate. + :type activation_signal_offset: str + :param minimum_activation_time: The minimum period for which the gate remains open, in the + absence of subsequent triggers (events). + :type minimum_activation_time: str + :param maximum_activation_time: The maximum period for which the gate remains open, in the + presence of subsequent events. + :type maximum_activation_time: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'activation_evaluation_window': {'key': 'activationEvaluationWindow', 'type': 'str'}, + 'activation_signal_offset': {'key': 'activationSignalOffset', 'type': 'str'}, + 'minimum_activation_time': {'key': 'minimumActivationTime', 'type': 'str'}, + 'maximum_activation_time': {'key': 'maximumActivationTime', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + activation_evaluation_window: Optional[str] = None, + activation_signal_offset: Optional[str] = None, + minimum_activation_time: Optional[str] = None, + maximum_activation_time: Optional[str] = None, + **kwargs + ): + super(MediaGraphSignalGateProcessor, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphSignalGateProcessor' # type: str + self.activation_evaluation_window = activation_evaluation_window + self.activation_signal_offset = activation_signal_offset + self.minimum_activation_time = minimum_activation_time + self.maximum_activation_time = maximum_activation_time + + +class MediaGraphSystemData(msrest.serialization.Model): + """Graph system data. + + :param created_at: The timestamp of resource creation (UTC). + :type created_at: ~datetime.datetime + :param last_modified_at: The timestamp of resource last modification (UTC). + :type last_modified_at: ~datetime.datetime + """ + + _attribute_map = { + 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, + 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, + } + + def __init__( + self, + *, + created_at: Optional[datetime.datetime] = None, + last_modified_at: Optional[datetime.datetime] = None, + **kwargs + ): + super(MediaGraphSystemData, self).__init__(**kwargs) + self.created_at = created_at + self.last_modified_at = last_modified_at + + +class MediaGraphTlsEndpoint(MediaGraphEndpoint): + """An endpoint that the graph can connect to, which must be connected over TLS/SSL. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null + designates that Azure Media Service's source of trust should be used. + :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource + :param validation_options: Validation options to use when authenticating a TLS connection. By + default, strict validation is used. + :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'MediaGraphCertificateSource'}, + 'validation_options': {'key': 'validationOptions', 'type': 'MediaGraphTlsValidationOptions'}, + } + + def __init__( + self, + *, + url: str, + credentials: Optional["MediaGraphCredentials"] = None, + trusted_certificates: Optional["MediaGraphCertificateSource"] = None, + validation_options: Optional["MediaGraphTlsValidationOptions"] = None, + **kwargs + ): + super(MediaGraphTlsEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) + self.type = '#Microsoft.Media.MediaGraphTlsEndpoint' # type: str + self.trusted_certificates = trusted_certificates + self.validation_options = validation_options + + +class MediaGraphTlsValidationOptions(msrest.serialization.Model): + """Options for controlling the authentication of TLS endpoints. + + :param ignore_hostname: Boolean value ignoring the host name (common name) during validation. + :type ignore_hostname: str + :param ignore_signature: Boolean value ignoring the integrity of the certificate chain at the + current time. + :type ignore_signature: str + """ + + _attribute_map = { + 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, + 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, + } + + def __init__( + self, + *, + ignore_hostname: Optional[str] = None, + ignore_signature: Optional[str] = None, + **kwargs + ): + super(MediaGraphTlsValidationOptions, self).__init__(**kwargs) + self.ignore_hostname = ignore_hostname + self.ignore_signature = ignore_signature + + +class MediaGraphTopology(msrest.serialization.Model): + """Describes a graph topology. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Describes the properties of a graph topology. + :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, + } + + def __init__( + self, + *, + name: str, + system_data: Optional["MediaGraphSystemData"] = None, + properties: Optional["MediaGraphTopologyProperties"] = None, + **kwargs + ): + super(MediaGraphTopology, self).__init__(**kwargs) + self.name = name + self.system_data = system_data + self.properties = properties + + +class MediaGraphTopologyCollection(msrest.serialization.Model): + """Collection of graph topologies. + + :param value: Collection of graph topologies. + :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] + :param continuation_token: Continuation token to use in subsequent calls to enumerate through + the graph topologies collection (when the collection contains too many results to return in one + response). + :type continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[MediaGraphTopology]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + *, + value: Optional[List["MediaGraphTopology"]] = None, + continuation_token: Optional[str] = None, + **kwargs + ): + super(MediaGraphTopologyCollection, self).__init__(**kwargs) + self.value = value + self.continuation_token = continuation_token + + +class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): + """MediaGraphTopologyDeleteRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphTopologyDeleteRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphTopologyDelete' # type: str + + +class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): + """MediaGraphTopologyGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphTopologyGetRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphTopologyGet' # type: str + + +class MediaGraphTopologyListRequest(OperationBase): + """MediaGraphTopologyListRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyListRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologyList' # type: str + + +class MediaGraphTopologyProperties(msrest.serialization.Model): + """Describes the properties of a graph topology. + + :param description: An optional description for the instance. + :type description: str + :param parameters: An optional description for the instance. + :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] + :param sources: An optional description for the instance. + :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] + :param processors: An optional description for the instance. + :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] + :param sinks: name. + :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDeclaration]'}, + 'sources': {'key': 'sources', 'type': '[MediaGraphSource]'}, + 'processors': {'key': 'processors', 'type': '[MediaGraphProcessor]'}, + 'sinks': {'key': 'sinks', 'type': '[MediaGraphSink]'}, + } + + def __init__( + self, + *, + description: Optional[str] = None, + parameters: Optional[List["MediaGraphParameterDeclaration"]] = None, + sources: Optional[List["MediaGraphSource"]] = None, + processors: Optional[List["MediaGraphProcessor"]] = None, + sinks: Optional[List["MediaGraphSink"]] = None, + **kwargs + ): + super(MediaGraphTopologyProperties, self).__init__(**kwargs) + self.description = description + self.parameters = parameters + self.sources = sources + self.processors = processors + self.sinks = sinks + + +class MediaGraphTopologySetRequest(OperationBase): + """MediaGraphTopologySetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param graph: Required. Describes a graph topology. + :type graph: ~azure.media.lva.edge.models.MediaGraphTopology + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'graph': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + graph: "MediaGraphTopology", + **kwargs + ): + super(MediaGraphTopologySetRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologySet' # type: str + self.graph = graph + + +class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): + """MediaGraphTopologySetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Describes the properties of a graph topology. + :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + system_data: Optional["MediaGraphSystemData"] = None, + properties: Optional["MediaGraphTopologyProperties"] = None, + **kwargs + ): + super(MediaGraphTopologySetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) + self.method_name = 'MediaGraphTopologySetRequestBody' # type: str + self.method_name = 'MediaGraphTopologySetRequestBody' # type: str + self.name = name + self.system_data = system_data + self.properties = properties + + +class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): + """An endpoint that the media graph can connect to, with no encryption in transit. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__( + self, + *, + url: str, + credentials: Optional["MediaGraphCredentials"] = None, + **kwargs + ): + super(MediaGraphUnsecuredEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) + self.type = '#Microsoft.Media.MediaGraphUnsecuredEndpoint' # type: str + + +class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): + """Username/password credential pair. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param username: Required. Username for a username/password pair. + :type username: str + :param password: Password for a username/password pair. + :type password: str + """ + + _validation = { + 'type': {'required': True}, + 'username': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'username': {'key': 'username', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__( + self, + *, + username: str, + password: Optional[str] = None, + **kwargs + ): + super(MediaGraphUsernamePasswordCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphUsernamePasswordCredentials' # type: str + self.username = username + self.password = password diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/py.typed b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/py.typed new file mode 100644 index 000000000000..e5aff4f83af8 --- /dev/null +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/dev_requirements.txt b/sdk/media/azure-media-livevideoanalytics-edge/dev_requirements.txt similarity index 100% rename from sdk/media/azure-media-lva-edge/dev_requirements.txt rename to sdk/media/azure-media-livevideoanalytics-edge/dev_requirements.txt diff --git a/sdk/media/azure-media-lva-edge/docs/DevTips.md b/sdk/media/azure-media-livevideoanalytics-edge/docs/DevTips.md similarity index 80% rename from sdk/media/azure-media-lva-edge/docs/DevTips.md rename to sdk/media/azure-media-livevideoanalytics-edge/docs/DevTips.md index b649d500d873..aee95a990e07 100644 --- a/sdk/media/azure-media-lva-edge/docs/DevTips.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/docs/DevTips.md @@ -18,11 +18,11 @@ tox -c eng/tox/tox.ini ``` To run a specific tox command from your directory use the following commands: ```bash -azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e sphinx -azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e lint -azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e mypy -azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e whl -azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e sdist +> tox -c ../../../eng/tox/tox.ini -e sphinx +> tox -c ../../../eng/tox/tox.ini -e lint +> tox -c ../../../eng/tox/tox.ini -e mypy +> tox -c ../../../eng/tox/tox.ini -e whl +> tox -c ../../../eng/tox/tox.ini -e sdist ``` A quick description of the five commands above: * sphinx: documentation generation using the inline comments written in our code diff --git a/sdk/media/azure-media-lva-edge/samples/sample_conditional_async.py b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_conditional_async.py similarity index 100% rename from sdk/media/azure-media-lva-edge/samples/sample_conditional_async.py rename to sdk/media/azure-media-livevideoanalytics-edge/samples/sample_conditional_async.py diff --git a/sdk/media/azure-media-lva-edge/samples/sample_hello_world.py b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_hello_world.py similarity index 100% rename from sdk/media/azure-media-lva-edge/samples/sample_hello_world.py rename to sdk/media/azure-media-livevideoanalytics-edge/samples/sample_hello_world.py diff --git a/sdk/media/azure-media-lva-edge/samples/sample_lva.py b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py similarity index 100% rename from sdk/media/azure-media-lva-edge/samples/sample_lva.py rename to sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py diff --git a/sdk/media/azure-media-lva-edge/sdk_packaging.toml b/sdk/media/azure-media-livevideoanalytics-edge/sdk_packaging.toml similarity index 100% rename from sdk/media/azure-media-lva-edge/sdk_packaging.toml rename to sdk/media/azure-media-livevideoanalytics-edge/sdk_packaging.toml diff --git a/sdk/media/azure-media-lva-edge/setup.cfg b/sdk/media/azure-media-livevideoanalytics-edge/setup.cfg similarity index 100% rename from sdk/media/azure-media-lva-edge/setup.cfg rename to sdk/media/azure-media-livevideoanalytics-edge/setup.cfg diff --git a/sdk/media/azure-media-lva-edge/setup.py b/sdk/media/azure-media-livevideoanalytics-edge/setup.py similarity index 98% rename from sdk/media/azure-media-lva-edge/setup.py rename to sdk/media/azure-media-livevideoanalytics-edge/setup.py index a4bfc61f9c6f..324e31db3312 100644 --- a/sdk/media/azure-media-lva-edge/setup.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/setup.py @@ -13,7 +13,7 @@ from setuptools import find_packages, setup # Change the PACKAGE_NAME only to change folder and different name -PACKAGE_NAME = "azure-media-lva-edge" +PACKAGE_NAME = "azure-media-livevideoanalytics-edge" PACKAGE_PPRINT_NAME = "Azure Media Live Video Analytics Edge SDK" # a-b-c => a/b/c diff --git a/sdk/media/azure-media-lva-edge/swagger/autorest.md b/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md similarity index 78% rename from sdk/media/azure-media-lva-edge/swagger/autorest.md rename to sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md index 48618fb331ed..d318650fa662 100644 --- a/sdk/media/azure-media-lva-edge/swagger/autorest.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md @@ -10,7 +10,7 @@ autorest --v3 --python ## Settings ```yaml -require: <>Azure\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md +require: C:\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md output-folder: ../azure/media/lva/edge/_generated namespace: azure.media.lva.edge no-namespace-folders: true diff --git a/sdk/media/azure-media-lva-edge/swagger/commandOutput.txt b/sdk/media/azure-media-livevideoanalytics-edge/swagger/commandOutput.txt similarity index 100% rename from sdk/media/azure-media-lva-edge/swagger/commandOutput.txt rename to sdk/media/azure-media-livevideoanalytics-edge/swagger/commandOutput.txt diff --git a/sdk/media/azure-media-lva-edge/tests/conftest.py b/sdk/media/azure-media-livevideoanalytics-edge/tests/conftest.py similarity index 100% rename from sdk/media/azure-media-lva-edge/tests/conftest.py rename to sdk/media/azure-media-livevideoanalytics-edge/tests/conftest.py diff --git a/sdk/media/azure-media-lva-edge/tests/test_app_config.py b/sdk/media/azure-media-livevideoanalytics-edge/tests/test_app_config.py similarity index 100% rename from sdk/media/azure-media-lva-edge/tests/test_app_config.py rename to sdk/media/azure-media-livevideoanalytics-edge/tests/test_app_config.py From aacf22aa278586865940e6f33dd6fe3d0ab82fcf Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 3 Dec 2020 09:02:47 -0800 Subject: [PATCH 17/64] fixing broken link --- sdk/media/azure-media-livevideoanalytics-edge/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/README.md b/sdk/media/azure-media-livevideoanalytics-edge/README.md index d2467be44b9e..5e665397682c 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/README.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/README.md @@ -121,7 +121,7 @@ additional questions or comments. [coc_contact]: mailto:opencode@microsoft.com [package]: TODO://link-to-published-package -[source]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/media/azure-media-lva-edge +[source]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/media [samples]: https://github.com/Azure-Samples/live-video-analytics-iot-edge-python [doc_direct_methods]: https://docs.microsoft.com/azure/media-services/live-video-analytics-edge/direct-methods From 773fff7e3da9ba550b3ee68cfed3c0e32d2808e6 Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 3 Dec 2020 09:15:35 -0800 Subject: [PATCH 18/64] missed one namespace update' --- .../edge/_generated/models/_models.py | 123 +- .../edge/_generated/models/_models_py3.py | 123 +- .../media/lva/edge/_generated/__init__.py | 1 - .../media/lva/edge/_generated/_version.py | 9 - .../lva/edge/_generated/models/__init__.py | 199 -- ...r_live_video_analyticson_io_tedge_enums.py | 108 - .../lva/edge/_generated/models/_models.py | 2008 --------------- .../lva/edge/_generated/models/_models_py3.py | 2185 ----------------- .../azure/media/lva/edge/_generated/py.typed | 1 - .../swagger/autorest.md | 4 +- 10 files changed, 136 insertions(+), 4625 deletions(-) delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/__init__.py delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/_version.py delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/__init__.py delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models.py delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models_py3.py delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/py.typed diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py index 62f58c7ea385..b0cb8248aec0 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py @@ -106,7 +106,7 @@ class MediaGraphSink(msrest.serialization.Model): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] """ _validation = { @@ -146,7 +146,7 @@ class MediaGraphAssetSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param asset_name_pattern: A name pattern when creating new assets. :type asset_name_pattern: str :param segment_length: When writing media to an asset, wait until at least this duration of @@ -236,7 +236,7 @@ class MediaGraphProcessor(msrest.serialization.Model): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] """ _validation = { @@ -279,11 +279,11 @@ class MediaGraphExtensionProcessorBase(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage + :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage """ _validation = { @@ -325,11 +325,11 @@ class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBas :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage + :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage """ _validation = { @@ -397,7 +397,7 @@ class MediaGraphEndpoint(msrest.serialization.Model): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str """ @@ -438,7 +438,7 @@ class MediaGraphFileSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param file_path_pattern: Required. Absolute file path pattern for creating new files on the Edge device. :type file_path_pattern: str @@ -478,7 +478,7 @@ class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not exceed this limit. :type maximum_fps: str @@ -517,13 +517,14 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage + :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage :param data_transfer: Required. How media should be transferred to the inferencing engine. - :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer + :type data_transfer: + ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransfer """ _validation = { @@ -561,7 +562,8 @@ class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): :type shared_memory_size_mi_b: str :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible values include: "Embedded", "SharedMemory". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode + :type mode: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransferMode """ _validation = { @@ -593,11 +595,11 @@ class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage + :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage """ _validation = { @@ -661,9 +663,9 @@ class MediaGraphImage(msrest.serialization.Model): """Describes the properties of an image frame. :param scale: The scaling mode for the image. - :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale + :type scale: ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScale :param format: Encoding settings for an image. - :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat + :type format: ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormat """ _attribute_map = { @@ -721,7 +723,8 @@ class MediaGraphImageFormatEncoded(MediaGraphImageFormat): :type type: str :param encoding: The different encoding formats that can be used for the image. Possible values include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". - :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat + :type encoding: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphImageEncodingFormat :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best quality). :type quality: str @@ -756,7 +759,8 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): :type type: str :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". - :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat + :type pixel_format: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormatRawPixelFormat """ _validation = { @@ -782,7 +786,7 @@ class MediaGraphImageScale(msrest.serialization.Model): :param mode: Describes the modes for scaling an input video frame into an image, before it is sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode + :type mode: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScaleMode :param width: The desired output width of the image. :type width: str :param height: The desired output height of the image. @@ -813,9 +817,9 @@ class MediaGraphInstance(msrest.serialization.Model): :param name: Required. name. :type name: str :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties """ _validation = { @@ -879,7 +883,7 @@ class MediaGraphInstanceCollection(msrest.serialization.Model): """Collection of graph instances. :param value: Collection of graph instances. - :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] + :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphInstance] :param continuation_token: Continuation token to use in subsequent calls to enumerate through the graph instance collection (when the collection contains too many results to return in one response). @@ -1051,10 +1055,11 @@ class MediaGraphInstanceProperties(msrest.serialization.Model): with this name should already have been set in the Edge module. :type topology_name: str :param parameters: List of one or more graph instance parameters. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] + :type parameters: + list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDefinition] :param state: Allowed states for a graph Instance. Possible values include: "Inactive", "Activating", "Active", "Deactivating". - :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState + :type state: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceState """ _attribute_map = { @@ -1087,7 +1092,7 @@ class MediaGraphInstanceSetRequest(OperationBase): :ivar api_version: api version. Default value: "1.0". :vartype api_version: str :param instance: Required. Represents a Media Graph instance. - :type instance: ~azure.media.lva.edge.models.MediaGraphInstance + :type instance: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstance """ _validation = { @@ -1127,9 +1132,9 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): :param name: Required. name. :type name: str :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties """ _validation = { @@ -1171,7 +1176,7 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param hub_output_name: Name of the output path to which the graph will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest. @@ -1284,10 +1289,11 @@ class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param sensitivity: Enumeration that specifies the sensitivity of the motion detection processor. Possible values include: "Low", "Medium", "High". - :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity + :type sensitivity: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphMotionDetectionSensitivity :param output_motion_region: Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true. :type output_motion_region: bool @@ -1324,7 +1330,8 @@ class MediaGraphNodeInput(msrest.serialization.Model): input to this node. :type node_name: str :param output_selectors: Allows for the selection of particular streams from another node. - :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] + :type output_selectors: + list[~azure.media.livevideoanalytics.edge.models.MediaGraphOutputSelector] """ _attribute_map = { @@ -1349,7 +1356,8 @@ class MediaGraphOutputSelector(msrest.serialization.Model): :ivar property: The stream property to compare with. Default value: "mediaType". :vartype property: str :param operator: The operator to compare streams by. Possible values include: "is", "isNot". - :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator + :type operator: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphOutputSelectorOperator :param value: Value to compare against. :type value: str """ @@ -1384,7 +1392,7 @@ class MediaGraphParameterDeclaration(msrest.serialization.Model): :type name: str :param type: Required. name. Possible values include: "String", "SecretString", "Int", "Double", "Bool". - :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType + :type type: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphParameterType :param description: Description of the parameter. :type description: str :param default: The default value for the parameter, to be used if the graph instance does not @@ -1487,9 +1495,9 @@ class MediaGraphRtspSource(MediaGraphSource): :type name: str :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. Possible values include: "Http", "Tcp". - :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport + :type transport: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphRtspTransport :param endpoint: Required. RTSP endpoint of the stream that is being connected to. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint """ _validation = { @@ -1526,7 +1534,7 @@ class MediaGraphSignalGateProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param activation_evaluation_window: The period of time over which the gate gathers input events, before evaluating them. :type activation_evaluation_window: str @@ -1601,15 +1609,17 @@ class MediaGraphTlsEndpoint(MediaGraphEndpoint): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null designates that Azure Media Service's source of trust should be used. - :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource + :type trusted_certificates: + ~azure.media.livevideoanalytics.edge.models.MediaGraphCertificateSource :param validation_options: Validation options to use when authenticating a TLS connection. By default, strict validation is used. - :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions + :type validation_options: + ~azure.media.livevideoanalytics.edge.models.MediaGraphTlsValidationOptions """ _validation = { @@ -1667,9 +1677,9 @@ class MediaGraphTopology(msrest.serialization.Model): :param name: Required. name. :type name: str :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties """ _validation = { @@ -1696,7 +1706,7 @@ class MediaGraphTopologyCollection(msrest.serialization.Model): """Collection of graph topologies. :param value: Collection of graph topologies. - :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] + :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphTopology] :param continuation_token: Continuation token to use in subsequent calls to enumerate through the graph topologies collection (when the collection contains too many results to return in one response). @@ -1828,13 +1838,14 @@ class MediaGraphTopologyProperties(msrest.serialization.Model): :param description: An optional description for the instance. :type description: str :param parameters: An optional description for the instance. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] + :type parameters: + list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDeclaration] :param sources: An optional description for the instance. - :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] + :type sources: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSource] :param processors: An optional description for the instance. - :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] + :type processors: list[~azure.media.livevideoanalytics.edge.models.MediaGraphProcessor] :param sinks: name. - :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] + :type sinks: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSink] """ _attribute_map = { @@ -1869,7 +1880,7 @@ class MediaGraphTopologySetRequest(OperationBase): :ivar api_version: api version. Default value: "1.0". :vartype api_version: str :param graph: Required. Describes a graph topology. - :type graph: ~azure.media.lva.edge.models.MediaGraphTopology + :type graph: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopology """ _validation = { @@ -1909,9 +1920,9 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): :param name: Required. name. :type name: str :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties """ _validation = { @@ -1950,7 +1961,7 @@ class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str """ diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py index 5de3adde8e11..a71214b4003f 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py @@ -113,7 +113,7 @@ class MediaGraphSink(msrest.serialization.Model): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] """ _validation = { @@ -156,7 +156,7 @@ class MediaGraphAssetSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param asset_name_pattern: A name pattern when creating new assets. :type asset_name_pattern: str :param segment_length: When writing media to an asset, wait until at least this duration of @@ -253,7 +253,7 @@ class MediaGraphProcessor(msrest.serialization.Model): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] """ _validation = { @@ -299,11 +299,11 @@ class MediaGraphExtensionProcessorBase(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage + :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage """ _validation = { @@ -350,11 +350,11 @@ class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBas :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage + :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage """ _validation = { @@ -427,7 +427,7 @@ class MediaGraphEndpoint(msrest.serialization.Model): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str """ @@ -471,7 +471,7 @@ class MediaGraphFileSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param file_path_pattern: Required. Absolute file path pattern for creating new files on the Edge device. :type file_path_pattern: str @@ -515,7 +515,7 @@ class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not exceed this limit. :type maximum_fps: str @@ -558,13 +558,14 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage + :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage :param data_transfer: Required. How media should be transferred to the inferencing engine. - :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer + :type data_transfer: + ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransfer """ _validation = { @@ -608,7 +609,8 @@ class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): :type shared_memory_size_mi_b: str :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible values include: "Embedded", "SharedMemory". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode + :type mode: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransferMode """ _validation = { @@ -643,11 +645,11 @@ class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage + :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage """ _validation = { @@ -719,9 +721,9 @@ class MediaGraphImage(msrest.serialization.Model): """Describes the properties of an image frame. :param scale: The scaling mode for the image. - :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale + :type scale: ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScale :param format: Encoding settings for an image. - :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat + :type format: ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormat """ _attribute_map = { @@ -782,7 +784,8 @@ class MediaGraphImageFormatEncoded(MediaGraphImageFormat): :type type: str :param encoding: The different encoding formats that can be used for the image. Possible values include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". - :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat + :type encoding: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphImageEncodingFormat :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best quality). :type quality: str @@ -820,7 +823,8 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): :type type: str :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". - :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat + :type pixel_format: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormatRawPixelFormat """ _validation = { @@ -848,7 +852,7 @@ class MediaGraphImageScale(msrest.serialization.Model): :param mode: Describes the modes for scaling an input video frame into an image, before it is sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode + :type mode: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScaleMode :param width: The desired output width of the image. :type width: str :param height: The desired output height of the image. @@ -883,9 +887,9 @@ class MediaGraphInstance(msrest.serialization.Model): :param name: Required. name. :type name: str :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties """ _validation = { @@ -955,7 +959,7 @@ class MediaGraphInstanceCollection(msrest.serialization.Model): """Collection of graph instances. :param value: Collection of graph instances. - :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] + :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphInstance] :param continuation_token: Continuation token to use in subsequent calls to enumerate through the graph instance collection (when the collection contains too many results to return in one response). @@ -1136,10 +1140,11 @@ class MediaGraphInstanceProperties(msrest.serialization.Model): with this name should already have been set in the Edge module. :type topology_name: str :param parameters: List of one or more graph instance parameters. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] + :type parameters: + list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDefinition] :param state: Allowed states for a graph Instance. Possible values include: "Inactive", "Activating", "Active", "Deactivating". - :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState + :type state: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceState """ _attribute_map = { @@ -1177,7 +1182,7 @@ class MediaGraphInstanceSetRequest(OperationBase): :ivar api_version: api version. Default value: "1.0". :vartype api_version: str :param instance: Required. Represents a Media Graph instance. - :type instance: ~azure.media.lva.edge.models.MediaGraphInstance + :type instance: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstance """ _validation = { @@ -1219,9 +1224,9 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): :param name: Required. name. :type name: str :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties """ _validation = { @@ -1267,7 +1272,7 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param hub_output_name: Name of the output path to which the graph will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest. @@ -1389,10 +1394,11 @@ class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param sensitivity: Enumeration that specifies the sensitivity of the motion detection processor. Possible values include: "Low", "Medium", "High". - :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity + :type sensitivity: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphMotionDetectionSensitivity :param output_motion_region: Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true. :type output_motion_region: bool @@ -1434,7 +1440,8 @@ class MediaGraphNodeInput(msrest.serialization.Model): input to this node. :type node_name: str :param output_selectors: Allows for the selection of particular streams from another node. - :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] + :type output_selectors: + list[~azure.media.livevideoanalytics.edge.models.MediaGraphOutputSelector] """ _attribute_map = { @@ -1462,7 +1469,8 @@ class MediaGraphOutputSelector(msrest.serialization.Model): :ivar property: The stream property to compare with. Default value: "mediaType". :vartype property: str :param operator: The operator to compare streams by. Possible values include: "is", "isNot". - :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator + :type operator: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphOutputSelectorOperator :param value: Value to compare against. :type value: str """ @@ -1500,7 +1508,7 @@ class MediaGraphParameterDeclaration(msrest.serialization.Model): :type name: str :param type: Required. name. Possible values include: "String", "SecretString", "Int", "Double", "Bool". - :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType + :type type: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphParameterType :param description: Description of the parameter. :type description: str :param default: The default value for the parameter, to be used if the graph instance does not @@ -1613,9 +1621,9 @@ class MediaGraphRtspSource(MediaGraphSource): :type name: str :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. Possible values include: "Http", "Tcp". - :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport + :type transport: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphRtspTransport :param endpoint: Required. RTSP endpoint of the stream that is being connected to. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint """ _validation = { @@ -1656,7 +1664,7 @@ class MediaGraphSignalGateProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param activation_evaluation_window: The period of time over which the gate gathers input events, before evaluating them. :type activation_evaluation_window: str @@ -1741,15 +1749,17 @@ class MediaGraphTlsEndpoint(MediaGraphEndpoint): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null designates that Azure Media Service's source of trust should be used. - :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource + :type trusted_certificates: + ~azure.media.livevideoanalytics.edge.models.MediaGraphCertificateSource :param validation_options: Validation options to use when authenticating a TLS connection. By default, strict validation is used. - :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions + :type validation_options: + ~azure.media.livevideoanalytics.edge.models.MediaGraphTlsValidationOptions """ _validation = { @@ -1815,9 +1825,9 @@ class MediaGraphTopology(msrest.serialization.Model): :param name: Required. name. :type name: str :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties """ _validation = { @@ -1848,7 +1858,7 @@ class MediaGraphTopologyCollection(msrest.serialization.Model): """Collection of graph topologies. :param value: Collection of graph topologies. - :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] + :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphTopology] :param continuation_token: Continuation token to use in subsequent calls to enumerate through the graph topologies collection (when the collection contains too many results to return in one response). @@ -1987,13 +1997,14 @@ class MediaGraphTopologyProperties(msrest.serialization.Model): :param description: An optional description for the instance. :type description: str :param parameters: An optional description for the instance. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] + :type parameters: + list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDeclaration] :param sources: An optional description for the instance. - :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] + :type sources: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSource] :param processors: An optional description for the instance. - :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] + :type processors: list[~azure.media.livevideoanalytics.edge.models.MediaGraphProcessor] :param sinks: name. - :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] + :type sinks: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSink] """ _attribute_map = { @@ -2034,7 +2045,7 @@ class MediaGraphTopologySetRequest(OperationBase): :ivar api_version: api version. Default value: "1.0". :vartype api_version: str :param graph: Required. Describes a graph topology. - :type graph: ~azure.media.lva.edge.models.MediaGraphTopology + :type graph: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopology """ _validation = { @@ -2076,9 +2087,9 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): :param name: Required. name. :type name: str :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties """ _validation = { @@ -2121,7 +2132,7 @@ class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str """ diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/__init__.py deleted file mode 100644 index 5960c353a898..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore \ No newline at end of file diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/_version.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/_version.py deleted file mode 100644 index 31ed98425268..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/_version.py +++ /dev/null @@ -1,9 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -VERSION = "1.0" diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/__init__.py deleted file mode 100644 index 2e389ab8ef9d..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/__init__.py +++ /dev/null @@ -1,199 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import ItemNonSetRequestBase - from ._models_py3 import MediaGraphAssetSink - from ._models_py3 import MediaGraphCertificateSource - from ._models_py3 import MediaGraphCognitiveServicesVisionExtension - from ._models_py3 import MediaGraphCredentials - from ._models_py3 import MediaGraphEndpoint - from ._models_py3 import MediaGraphExtensionProcessorBase - from ._models_py3 import MediaGraphFileSink - from ._models_py3 import MediaGraphFrameRateFilterProcessor - from ._models_py3 import MediaGraphGrpcExtension - from ._models_py3 import MediaGraphGrpcExtensionDataTransfer - from ._models_py3 import MediaGraphHttpExtension - from ._models_py3 import MediaGraphHttpHeaderCredentials - from ._models_py3 import MediaGraphImage - from ._models_py3 import MediaGraphImageFormat - from ._models_py3 import MediaGraphImageFormatEncoded - from ._models_py3 import MediaGraphImageFormatRaw - from ._models_py3 import MediaGraphImageScale - from ._models_py3 import MediaGraphInstance - from ._models_py3 import MediaGraphInstanceActivateRequest - from ._models_py3 import MediaGraphInstanceCollection - from ._models_py3 import MediaGraphInstanceDeActivateRequest - from ._models_py3 import MediaGraphInstanceDeleteRequest - from ._models_py3 import MediaGraphInstanceGetRequest - from ._models_py3 import MediaGraphInstanceListRequest - from ._models_py3 import MediaGraphInstanceProperties - from ._models_py3 import MediaGraphInstanceSetRequest - from ._models_py3 import MediaGraphInstanceSetRequestBody - from ._models_py3 import MediaGraphIoTHubMessageSink - from ._models_py3 import MediaGraphIoTHubMessageSource - from ._models_py3 import MediaGraphMotionDetectionProcessor - from ._models_py3 import MediaGraphNodeInput - from ._models_py3 import MediaGraphOutputSelector - from ._models_py3 import MediaGraphParameterDeclaration - from ._models_py3 import MediaGraphParameterDefinition - from ._models_py3 import MediaGraphPemCertificateList - from ._models_py3 import MediaGraphProcessor - from ._models_py3 import MediaGraphRtspSource - from ._models_py3 import MediaGraphSignalGateProcessor - from ._models_py3 import MediaGraphSink - from ._models_py3 import MediaGraphSource - from ._models_py3 import MediaGraphSystemData - from ._models_py3 import MediaGraphTlsEndpoint - from ._models_py3 import MediaGraphTlsValidationOptions - from ._models_py3 import MediaGraphTopology - from ._models_py3 import MediaGraphTopologyCollection - from ._models_py3 import MediaGraphTopologyDeleteRequest - from ._models_py3 import MediaGraphTopologyGetRequest - from ._models_py3 import MediaGraphTopologyListRequest - from ._models_py3 import MediaGraphTopologyProperties - from ._models_py3 import MediaGraphTopologySetRequest - from ._models_py3 import MediaGraphTopologySetRequestBody - from ._models_py3 import MediaGraphUnsecuredEndpoint - from ._models_py3 import MediaGraphUsernamePasswordCredentials - from ._models_py3 import OperationBase -except (SyntaxError, ImportError): - from ._models import ItemNonSetRequestBase # type: ignore - from ._models import MediaGraphAssetSink # type: ignore - from ._models import MediaGraphCertificateSource # type: ignore - from ._models import MediaGraphCognitiveServicesVisionExtension # type: ignore - from ._models import MediaGraphCredentials # type: ignore - from ._models import MediaGraphEndpoint # type: ignore - from ._models import MediaGraphExtensionProcessorBase # type: ignore - from ._models import MediaGraphFileSink # type: ignore - from ._models import MediaGraphFrameRateFilterProcessor # type: ignore - from ._models import MediaGraphGrpcExtension # type: ignore - from ._models import MediaGraphGrpcExtensionDataTransfer # type: ignore - from ._models import MediaGraphHttpExtension # type: ignore - from ._models import MediaGraphHttpHeaderCredentials # type: ignore - from ._models import MediaGraphImage # type: ignore - from ._models import MediaGraphImageFormat # type: ignore - from ._models import MediaGraphImageFormatEncoded # type: ignore - from ._models import MediaGraphImageFormatRaw # type: ignore - from ._models import MediaGraphImageScale # type: ignore - from ._models import MediaGraphInstance # type: ignore - from ._models import MediaGraphInstanceActivateRequest # type: ignore - from ._models import MediaGraphInstanceCollection # type: ignore - from ._models import MediaGraphInstanceDeActivateRequest # type: ignore - from ._models import MediaGraphInstanceDeleteRequest # type: ignore - from ._models import MediaGraphInstanceGetRequest # type: ignore - from ._models import MediaGraphInstanceListRequest # type: ignore - from ._models import MediaGraphInstanceProperties # type: ignore - from ._models import MediaGraphInstanceSetRequest # type: ignore - from ._models import MediaGraphInstanceSetRequestBody # type: ignore - from ._models import MediaGraphIoTHubMessageSink # type: ignore - from ._models import MediaGraphIoTHubMessageSource # type: ignore - from ._models import MediaGraphMotionDetectionProcessor # type: ignore - from ._models import MediaGraphNodeInput # type: ignore - from ._models import MediaGraphOutputSelector # type: ignore - from ._models import MediaGraphParameterDeclaration # type: ignore - from ._models import MediaGraphParameterDefinition # type: ignore - from ._models import MediaGraphPemCertificateList # type: ignore - from ._models import MediaGraphProcessor # type: ignore - from ._models import MediaGraphRtspSource # type: ignore - from ._models import MediaGraphSignalGateProcessor # type: ignore - from ._models import MediaGraphSink # type: ignore - from ._models import MediaGraphSource # type: ignore - from ._models import MediaGraphSystemData # type: ignore - from ._models import MediaGraphTlsEndpoint # type: ignore - from ._models import MediaGraphTlsValidationOptions # type: ignore - from ._models import MediaGraphTopology # type: ignore - from ._models import MediaGraphTopologyCollection # type: ignore - from ._models import MediaGraphTopologyDeleteRequest # type: ignore - from ._models import MediaGraphTopologyGetRequest # type: ignore - from ._models import MediaGraphTopologyListRequest # type: ignore - from ._models import MediaGraphTopologyProperties # type: ignore - from ._models import MediaGraphTopologySetRequest # type: ignore - from ._models import MediaGraphTopologySetRequestBody # type: ignore - from ._models import MediaGraphUnsecuredEndpoint # type: ignore - from ._models import MediaGraphUsernamePasswordCredentials # type: ignore - from ._models import OperationBase # type: ignore - -from ._definitionsfor_live_video_analyticson_io_tedge_enums import ( - MediaGraphGrpcExtensionDataTransferMode, - MediaGraphImageEncodingFormat, - MediaGraphImageFormatRawPixelFormat, - MediaGraphImageScaleMode, - MediaGraphInstanceState, - MediaGraphMotionDetectionSensitivity, - MediaGraphOutputSelectorOperator, - MediaGraphParameterType, - MediaGraphRtspTransport, -) - -__all__ = [ - 'ItemNonSetRequestBase', - 'MediaGraphAssetSink', - 'MediaGraphCertificateSource', - 'MediaGraphCognitiveServicesVisionExtension', - 'MediaGraphCredentials', - 'MediaGraphEndpoint', - 'MediaGraphExtensionProcessorBase', - 'MediaGraphFileSink', - 'MediaGraphFrameRateFilterProcessor', - 'MediaGraphGrpcExtension', - 'MediaGraphGrpcExtensionDataTransfer', - 'MediaGraphHttpExtension', - 'MediaGraphHttpHeaderCredentials', - 'MediaGraphImage', - 'MediaGraphImageFormat', - 'MediaGraphImageFormatEncoded', - 'MediaGraphImageFormatRaw', - 'MediaGraphImageScale', - 'MediaGraphInstance', - 'MediaGraphInstanceActivateRequest', - 'MediaGraphInstanceCollection', - 'MediaGraphInstanceDeActivateRequest', - 'MediaGraphInstanceDeleteRequest', - 'MediaGraphInstanceGetRequest', - 'MediaGraphInstanceListRequest', - 'MediaGraphInstanceProperties', - 'MediaGraphInstanceSetRequest', - 'MediaGraphInstanceSetRequestBody', - 'MediaGraphIoTHubMessageSink', - 'MediaGraphIoTHubMessageSource', - 'MediaGraphMotionDetectionProcessor', - 'MediaGraphNodeInput', - 'MediaGraphOutputSelector', - 'MediaGraphParameterDeclaration', - 'MediaGraphParameterDefinition', - 'MediaGraphPemCertificateList', - 'MediaGraphProcessor', - 'MediaGraphRtspSource', - 'MediaGraphSignalGateProcessor', - 'MediaGraphSink', - 'MediaGraphSource', - 'MediaGraphSystemData', - 'MediaGraphTlsEndpoint', - 'MediaGraphTlsValidationOptions', - 'MediaGraphTopology', - 'MediaGraphTopologyCollection', - 'MediaGraphTopologyDeleteRequest', - 'MediaGraphTopologyGetRequest', - 'MediaGraphTopologyListRequest', - 'MediaGraphTopologyProperties', - 'MediaGraphTopologySetRequest', - 'MediaGraphTopologySetRequestBody', - 'MediaGraphUnsecuredEndpoint', - 'MediaGraphUsernamePasswordCredentials', - 'OperationBase', - 'MediaGraphGrpcExtensionDataTransferMode', - 'MediaGraphImageEncodingFormat', - 'MediaGraphImageFormatRawPixelFormat', - 'MediaGraphImageScaleMode', - 'MediaGraphInstanceState', - 'MediaGraphMotionDetectionSensitivity', - 'MediaGraphOutputSelectorOperator', - 'MediaGraphParameterType', - 'MediaGraphRtspTransport', -] diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py deleted file mode 100644 index 6e78e4728244..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py +++ /dev/null @@ -1,108 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum, EnumMeta -from six import with_metaclass - -class _CaseInsensitiveEnumMeta(EnumMeta): - def __getitem__(self, name): - return super().__getitem__(name.upper()) - - def __getattr__(cls, name): - """Return the enum member matching `name` - We use __getattr__ instead of descriptors or inserting into the enum - class' __dict__ in order to support `name` and `value` being both - properties for enum members (which live in the class' __dict__) and - enum members themselves. - """ - try: - return cls._member_map_[name.upper()] - except KeyError: - raise AttributeError(name) - - -class MediaGraphGrpcExtensionDataTransferMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """How frame data should be transmitted to the inferencing engine. - """ - - EMBEDDED = "Embedded" #: Frames are transferred embedded into the gRPC messages. - SHARED_MEMORY = "SharedMemory" #: Frames are transferred through shared memory. - -class MediaGraphImageEncodingFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The different encoding formats that can be used for the image. - """ - - JPEG = "Jpeg" #: JPEG image format. - BMP = "Bmp" #: BMP image format. - PNG = "Png" #: PNG image format. - -class MediaGraphImageFormatRawPixelFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """pixel format - """ - - YUV420_P = "Yuv420p" #: Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples). - RGB565_BE = "Rgb565be" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian. - RGB565_LE = "Rgb565le" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian. - RGB555_BE = "Rgb555be" #: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined. - RGB555_LE = "Rgb555le" #: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined. - RGB24 = "Rgb24" #: Packed RGB 8:8:8, 24bpp, RGBRGB. - BGR24 = "Bgr24" #: Packed RGB 8:8:8, 24bpp, BGRBGR. - ARGB = "Argb" #: Packed ARGB 8:8:8:8, 32bpp, ARGBARGB. - RGBA = "Rgba" #: Packed RGBA 8:8:8:8, 32bpp, RGBARGBA. - ABGR = "Abgr" #: Packed ABGR 8:8:8:8, 32bpp, ABGRABGR. - BGRA = "Bgra" #: Packed BGRA 8:8:8:8, 32bpp, BGRABGRA. - -class MediaGraphImageScaleMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Describes the modes for scaling an input video frame into an image, before it is sent to an - inference engine. - """ - - PRESERVE_ASPECT_RATIO = "PreserveAspectRatio" #: Use the same aspect ratio as the input frame. - PAD = "Pad" #: Center pad the input frame to match the given dimensions. - STRETCH = "Stretch" #: Stretch input frame to match given dimensions. - -class MediaGraphInstanceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Allowed states for a graph Instance. - """ - - INACTIVE = "Inactive" #: Inactive state. - ACTIVATING = "Activating" #: Activating state. - ACTIVE = "Active" #: Active state. - DEACTIVATING = "Deactivating" #: Deactivating state. - -class MediaGraphMotionDetectionSensitivity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Enumeration that specifies the sensitivity of the motion detection processor. - """ - - LOW = "Low" #: Low Sensitivity. - MEDIUM = "Medium" #: Medium Sensitivity. - HIGH = "High" #: High Sensitivity. - -class MediaGraphOutputSelectorOperator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The operator to compare streams by. - """ - - IS_ENUM = "is" #: A media type is the same type or a subtype. - IS_NOT = "isNot" #: A media type is not the same type or a subtype. - -class MediaGraphParameterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """name - """ - - STRING = "String" #: A string parameter value. - SECRET_STRING = "SecretString" #: A string to hold sensitive information as parameter value. - INT = "Int" #: A 32-bit signed integer as parameter value. - DOUBLE = "Double" #: A 64-bit double-precision floating point type as parameter value. - BOOL = "Bool" #: A boolean value that is either true or false. - -class MediaGraphRtspTransport(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Underlying RTSP transport. This is used to enable or disable HTTP tunneling. - """ - - HTTP = "Http" #: HTTP/HTTPS transport. This should be used when HTTP tunneling is desired. - TCP = "Tcp" #: TCP transport. This should be used when HTTP tunneling is NOT desired. diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models.py deleted file mode 100644 index 62f58c7ea385..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models.py +++ /dev/null @@ -1,2008 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import msrest.serialization - - -class OperationBase(msrest.serialization.Model): - """OperationBase. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - _subtype_map = { - 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(OperationBase, self).__init__(**kwargs) - self.method_name = None # type: Optional[str] - - -class ItemNonSetRequestBase(OperationBase): - """ItemNonSetRequestBase. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphInstanceActivateRequest, MediaGraphInstanceDeActivateRequest, MediaGraphInstanceDeleteRequest, MediaGraphInstanceGetRequest, MediaGraphTopologyDeleteRequest, MediaGraphTopologyGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - _subtype_map = { - 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(ItemNonSetRequestBase, self).__init__(**kwargs) - self.method_name = 'ItemNonSetRequestBase' # type: str - self.name = kwargs['name'] - - -class MediaGraphSink(msrest.serialization.Model): - """Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphAssetSink, MediaGraphFileSink, MediaGraphIoTHubMessageSink. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphAssetSink': 'MediaGraphAssetSink', '#Microsoft.Media.MediaGraphFileSink': 'MediaGraphFileSink', '#Microsoft.Media.MediaGraphIoTHubMessageSink': 'MediaGraphIoTHubMessageSink'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphSink, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = kwargs['name'] - self.inputs = kwargs['inputs'] - - -class MediaGraphAssetSink(MediaGraphSink): - """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param asset_name_pattern: A name pattern when creating new assets. - :type asset_name_pattern: str - :param segment_length: When writing media to an asset, wait until at least this duration of - media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum - of 30 seconds and a recommended maximum of 5 minutes. - :type segment_length: ~datetime.timedelta - :param local_media_cache_path: Path to a local file system directory for temporary caching of - media, before writing to an Asset. Used when the Edge device is temporarily disconnected from - Azure. - :type local_media_cache_path: str - :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for - temporary caching of media. - :type local_media_cache_maximum_size_mi_b: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, - 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, - 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, - 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphAssetSink, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphAssetSink' # type: str - self.asset_name_pattern = kwargs.get('asset_name_pattern', None) - self.segment_length = kwargs.get('segment_length', None) - self.local_media_cache_path = kwargs.get('local_media_cache_path', None) - self.local_media_cache_maximum_size_mi_b = kwargs.get('local_media_cache_maximum_size_mi_b', None) - - -class MediaGraphCertificateSource(msrest.serialization.Model): - """Base class for certificate sources. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphPemCertificateList. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphPemCertificateList': 'MediaGraphPemCertificateList'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphCertificateSource, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphProcessor(msrest.serialization.Model): - """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphProcessor, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = kwargs['name'] - self.inputs = kwargs['inputs'] - - -class MediaGraphExtensionProcessorBase(MediaGraphProcessor): - """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension': 'MediaGraphCognitiveServicesVisionExtension', '#Microsoft.Media.MediaGraphGrpcExtension': 'MediaGraphGrpcExtension', '#Microsoft.Media.MediaGraphHttpExtension': 'MediaGraphHttpExtension'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphExtensionProcessorBase, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str - self.endpoint = kwargs.get('endpoint', None) - self.image = kwargs.get('image', None) - - -class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphCognitiveServicesVisionExtension, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension' # type: str - - -class MediaGraphCredentials(msrest.serialization.Model): - """Credentials to present during authentication. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphHttpHeaderCredentials, MediaGraphUsernamePasswordCredentials. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphHttpHeaderCredentials': 'MediaGraphHttpHeaderCredentials', '#Microsoft.Media.MediaGraphUsernamePasswordCredentials': 'MediaGraphUsernamePasswordCredentials'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphCredentials, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphEndpoint(msrest.serialization.Model): - """Base class for endpoints. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphTlsEndpoint, MediaGraphUnsecuredEndpoint. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphTlsEndpoint': 'MediaGraphTlsEndpoint', '#Microsoft.Media.MediaGraphUnsecuredEndpoint': 'MediaGraphUnsecuredEndpoint'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphEndpoint, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.credentials = kwargs.get('credentials', None) - self.url = kwargs['url'] - - -class MediaGraphFileSink(MediaGraphSink): - """Enables a media graph to write/store media (video and audio) to a file on the Edge device. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param file_path_pattern: Required. Absolute file path pattern for creating new files on the - Edge device. - :type file_path_pattern: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - 'file_path_pattern': {'required': True, 'min_length': 1}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphFileSink, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str - self.file_path_pattern = kwargs['file_path_pattern'] - - -class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): - """Limits the frame rate on the input video stream based on the maximumFps property. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not - exceed this limit. - :type maximum_fps: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphFrameRateFilterProcessor, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str - self.maximum_fps = kwargs.get('maximum_fps', None) - - -class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - :param data_transfer: Required. How media should be transferred to the inferencing engine. - :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - 'data_transfer': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphGrpcExtension, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str - self.data_transfer = kwargs['data_transfer'] - - -class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): - """Describes how media should be transferred to the inferencing engine. - - All required parameters must be populated in order to send to Azure. - - :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if - mode is SharedMemory. Should not be specificed otherwise. - :type shared_memory_size_mi_b: str - :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible - values include: "Embedded", "SharedMemory". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode - """ - - _validation = { - 'mode': {'required': True}, - } - - _attribute_map = { - 'shared_memory_size_mi_b': {'key': 'sharedMemorySizeMiB', 'type': 'str'}, - 'mode': {'key': 'mode', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphGrpcExtensionDataTransfer, self).__init__(**kwargs) - self.shared_memory_size_mi_b = kwargs.get('shared_memory_size_mi_b', None) - self.mode = kwargs['mode'] - - -class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphHttpExtension, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphHttpExtension' # type: str - - -class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): - """Http header service credentials. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param header_name: Required. HTTP header name. - :type header_name: str - :param header_value: Required. HTTP header value. - :type header_value: str - """ - - _validation = { - 'type': {'required': True}, - 'header_name': {'required': True}, - 'header_value': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'header_name': {'key': 'headerName', 'type': 'str'}, - 'header_value': {'key': 'headerValue', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphHttpHeaderCredentials, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphHttpHeaderCredentials' # type: str - self.header_name = kwargs['header_name'] - self.header_value = kwargs['header_value'] - - -class MediaGraphImage(msrest.serialization.Model): - """Describes the properties of an image frame. - - :param scale: The scaling mode for the image. - :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale - :param format: Encoding settings for an image. - :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat - """ - - _attribute_map = { - 'scale': {'key': 'scale', 'type': 'MediaGraphImageScale'}, - 'format': {'key': 'format', 'type': 'MediaGraphImageFormat'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImage, self).__init__(**kwargs) - self.scale = kwargs.get('scale', None) - self.format = kwargs.get('format', None) - - -class MediaGraphImageFormat(msrest.serialization.Model): - """Encoding settings for an image. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImageFormat, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphImageFormatEncoded(MediaGraphImageFormat): - """Allowed formats for the image. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param encoding: The different encoding formats that can be used for the image. Possible values - include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". - :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat - :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best - quality). - :type quality: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'encoding': {'key': 'encoding', 'type': 'str'}, - 'quality': {'key': 'quality', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str - self.encoding = kwargs.get('encoding', "Jpeg") - self.quality = kwargs.get('quality', None) - - -class MediaGraphImageFormatRaw(MediaGraphImageFormat): - """Encoding settings for raw images. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", - "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". - :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'pixel_format': {'key': 'pixelFormat', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImageFormatRaw, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphImageFormatRaw' # type: str - self.pixel_format = kwargs.get('pixel_format', None) - - -class MediaGraphImageScale(msrest.serialization.Model): - """The scaling mode for the image. - - :param mode: Describes the modes for scaling an input video frame into an image, before it is - sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode - :param width: The desired output width of the image. - :type width: str - :param height: The desired output height of the image. - :type height: str - """ - - _attribute_map = { - 'mode': {'key': 'mode', 'type': 'str'}, - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImageScale, self).__init__(**kwargs) - self.mode = kwargs.get('mode', None) - self.width = kwargs.get('width', None) - self.height = kwargs.get('height', None) - - -class MediaGraphInstance(msrest.serialization.Model): - """Represents a Media Graph instance. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstance, self).__init__(**kwargs) - self.name = kwargs['name'] - self.system_data = kwargs.get('system_data', None) - self.properties = kwargs.get('properties', None) - - -class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): - """MediaGraphInstanceActivateRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceActivateRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceActivate' # type: str - - -class MediaGraphInstanceCollection(msrest.serialization.Model): - """Collection of graph instances. - - :param value: Collection of graph instances. - :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph instance collection (when the collection contains too many results to return in one - response). - :type continuation_token: str - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': '[MediaGraphInstance]'}, - 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceCollection, self).__init__(**kwargs) - self.value = kwargs.get('value', None) - self.continuation_token = kwargs.get('continuation_token', None) - - -class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): - """MediaGraphInstanceDeActivateRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceDeActivateRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceDeactivate' # type: str - - -class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): - """MediaGraphInstanceDeleteRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceDeleteRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceDelete' # type: str - - -class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): - """MediaGraphInstanceGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceGetRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceGet' # type: str - - -class MediaGraphInstanceListRequest(OperationBase): - """MediaGraphInstanceListRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceListRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceList' # type: str - - -class MediaGraphInstanceProperties(msrest.serialization.Model): - """Properties of a Media Graph instance. - - :param description: An optional description for the instance. - :type description: str - :param topology_name: The name of the graph topology that this instance will run. A topology - with this name should already have been set in the Edge module. - :type topology_name: str - :param parameters: List of one or more graph instance parameters. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] - :param state: Allowed states for a graph Instance. Possible values include: "Inactive", - "Activating", "Active", "Deactivating". - :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState - """ - - _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, - 'topology_name': {'key': 'topologyName', 'type': 'str'}, - 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDefinition]'}, - 'state': {'key': 'state', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceProperties, self).__init__(**kwargs) - self.description = kwargs.get('description', None) - self.topology_name = kwargs.get('topology_name', None) - self.parameters = kwargs.get('parameters', None) - self.state = kwargs.get('state', None) - - -class MediaGraphInstanceSetRequest(OperationBase): - """MediaGraphInstanceSetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param instance: Required. Represents a Media Graph instance. - :type instance: ~azure.media.lva.edge.models.MediaGraphInstance - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'instance': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceSetRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceSet' # type: str - self.instance = kwargs['instance'] - - -class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): - """MediaGraphInstanceSetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceSetRequestBody, self).__init__(**kwargs) - self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str - self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str - self.name = kwargs['name'] - self.system_data = kwargs.get('system_data', None) - self.properties = kwargs.get('properties', None) - - -class MediaGraphIoTHubMessageSink(MediaGraphSink): - """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param hub_output_name: Name of the output path to which the graph will publish message. These - messages can then be delivered to desired destinations by declaring routes referencing the - output path in the IoT Edge deployment manifest. - :type hub_output_name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'hub_output_name': {'key': 'hubOutputName', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphIoTHubMessageSink, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSink' # type: str - self.hub_output_name = kwargs.get('hub_output_name', None) - - -class MediaGraphSource(msrest.serialization.Model): - """Media graph source. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphIoTHubMessageSource': 'MediaGraphIoTHubMessageSource', '#Microsoft.Media.MediaGraphRtspSource': 'MediaGraphRtspSource'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphSource, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = kwargs['name'] - - -class MediaGraphIoTHubMessageSource(MediaGraphSource): - """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - :param hub_input_name: Name of the input path where messages can be routed to (via routes - declared in the IoT Edge deployment manifest). - :type hub_input_name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'hub_input_name': {'key': 'hubInputName', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphIoTHubMessageSource, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSource' # type: str - self.hub_input_name = kwargs.get('hub_input_name', None) - - -class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): - """A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param sensitivity: Enumeration that specifies the sensitivity of the motion detection - processor. Possible values include: "Low", "Medium", "High". - :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity - :param output_motion_region: Indicates whether the processor should detect and output the - regions, within the video frame, where motion was detected. Default is true. - :type output_motion_region: bool - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, - 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphMotionDetectionProcessor, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str - self.sensitivity = kwargs.get('sensitivity', None) - self.output_motion_region = kwargs.get('output_motion_region', None) - - -class MediaGraphNodeInput(msrest.serialization.Model): - """Represents the input to any node in a media graph. - - :param node_name: The name of another node in the media graph, the output of which is used as - input to this node. - :type node_name: str - :param output_selectors: Allows for the selection of particular streams from another node. - :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] - """ - - _attribute_map = { - 'node_name': {'key': 'nodeName', 'type': 'str'}, - 'output_selectors': {'key': 'outputSelectors', 'type': '[MediaGraphOutputSelector]'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphNodeInput, self).__init__(**kwargs) - self.node_name = kwargs.get('node_name', None) - self.output_selectors = kwargs.get('output_selectors', None) - - -class MediaGraphOutputSelector(msrest.serialization.Model): - """Allows for the selection of particular streams from another node. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar property: The stream property to compare with. Default value: "mediaType". - :vartype property: str - :param operator: The operator to compare streams by. Possible values include: "is", "isNot". - :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator - :param value: Value to compare against. - :type value: str - """ - - _validation = { - 'property': {'constant': True}, - } - - _attribute_map = { - 'property': {'key': 'property', 'type': 'str'}, - 'operator': {'key': 'operator', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - property = "mediaType" - - def __init__( - self, - **kwargs - ): - super(MediaGraphOutputSelector, self).__init__(**kwargs) - self.operator = kwargs.get('operator', None) - self.value = kwargs.get('value', None) - - -class MediaGraphParameterDeclaration(msrest.serialization.Model): - """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the parameter. - :type name: str - :param type: Required. name. Possible values include: "String", "SecretString", "Int", - "Double", "Bool". - :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType - :param description: Description of the parameter. - :type description: str - :param default: The default value for the parameter, to be used if the graph instance does not - specify a value. - :type default: str - """ - - _validation = { - 'name': {'required': True, 'max_length': 64, 'min_length': 0}, - 'type': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'default': {'key': 'default', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphParameterDeclaration, self).__init__(**kwargs) - self.name = kwargs['name'] - self.type = kwargs['type'] - self.description = kwargs.get('description', None) - self.default = kwargs.get('default', None) - - -class MediaGraphParameterDefinition(msrest.serialization.Model): - """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. Name of parameter as defined in the graph topology. - :type name: str - :param value: Required. Value of parameter. - :type value: str - """ - - _validation = { - 'name': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphParameterDefinition, self).__init__(**kwargs) - self.name = kwargs['name'] - self.value = kwargs['value'] - - -class MediaGraphPemCertificateList(MediaGraphCertificateSource): - """A list of PEM formatted certificates. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param certificates: Required. PEM formatted public certificates one per entry. - :type certificates: list[str] - """ - - _validation = { - 'type': {'required': True}, - 'certificates': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'certificates': {'key': 'certificates', 'type': '[str]'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphPemCertificateList, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphPemCertificateList' # type: str - self.certificates = kwargs['certificates'] - - -class MediaGraphRtspSource(MediaGraphSource): - """Enables a graph to capture media from a RTSP server. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. - Possible values include: "Http", "Tcp". - :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport - :param endpoint: Required. RTSP endpoint of the stream that is being connected to. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'endpoint': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'transport': {'key': 'transport', 'type': 'str'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphRtspSource, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphRtspSource' # type: str - self.transport = kwargs.get('transport', None) - self.endpoint = kwargs['endpoint'] - - -class MediaGraphSignalGateProcessor(MediaGraphProcessor): - """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param activation_evaluation_window: The period of time over which the gate gathers input - events, before evaluating them. - :type activation_evaluation_window: str - :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It - is an offset between the time the event is received, and the timestamp of the first media - sample (eg. video frame) that is allowed through by the gate. - :type activation_signal_offset: str - :param minimum_activation_time: The minimum period for which the gate remains open, in the - absence of subsequent triggers (events). - :type minimum_activation_time: str - :param maximum_activation_time: The maximum period for which the gate remains open, in the - presence of subsequent events. - :type maximum_activation_time: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'activation_evaluation_window': {'key': 'activationEvaluationWindow', 'type': 'str'}, - 'activation_signal_offset': {'key': 'activationSignalOffset', 'type': 'str'}, - 'minimum_activation_time': {'key': 'minimumActivationTime', 'type': 'str'}, - 'maximum_activation_time': {'key': 'maximumActivationTime', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphSignalGateProcessor, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphSignalGateProcessor' # type: str - self.activation_evaluation_window = kwargs.get('activation_evaluation_window', None) - self.activation_signal_offset = kwargs.get('activation_signal_offset', None) - self.minimum_activation_time = kwargs.get('minimum_activation_time', None) - self.maximum_activation_time = kwargs.get('maximum_activation_time', None) - - -class MediaGraphSystemData(msrest.serialization.Model): - """Graph system data. - - :param created_at: The timestamp of resource creation (UTC). - :type created_at: ~datetime.datetime - :param last_modified_at: The timestamp of resource last modification (UTC). - :type last_modified_at: ~datetime.datetime - """ - - _attribute_map = { - 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, - 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphSystemData, self).__init__(**kwargs) - self.created_at = kwargs.get('created_at', None) - self.last_modified_at = kwargs.get('last_modified_at', None) - - -class MediaGraphTlsEndpoint(MediaGraphEndpoint): - """An endpoint that the graph can connect to, which must be connected over TLS/SSL. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null - designates that Azure Media Service's source of trust should be used. - :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource - :param validation_options: Validation options to use when authenticating a TLS connection. By - default, strict validation is used. - :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'MediaGraphCertificateSource'}, - 'validation_options': {'key': 'validationOptions', 'type': 'MediaGraphTlsValidationOptions'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphTlsEndpoint, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphTlsEndpoint' # type: str - self.trusted_certificates = kwargs.get('trusted_certificates', None) - self.validation_options = kwargs.get('validation_options', None) - - -class MediaGraphTlsValidationOptions(msrest.serialization.Model): - """Options for controlling the authentication of TLS endpoints. - - :param ignore_hostname: Boolean value ignoring the host name (common name) during validation. - :type ignore_hostname: str - :param ignore_signature: Boolean value ignoring the integrity of the certificate chain at the - current time. - :type ignore_signature: str - """ - - _attribute_map = { - 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, - 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphTlsValidationOptions, self).__init__(**kwargs) - self.ignore_hostname = kwargs.get('ignore_hostname', None) - self.ignore_signature = kwargs.get('ignore_signature', None) - - -class MediaGraphTopology(msrest.serialization.Model): - """Describes a graph topology. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopology, self).__init__(**kwargs) - self.name = kwargs['name'] - self.system_data = kwargs.get('system_data', None) - self.properties = kwargs.get('properties', None) - - -class MediaGraphTopologyCollection(msrest.serialization.Model): - """Collection of graph topologies. - - :param value: Collection of graph topologies. - :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph topologies collection (when the collection contains too many results to return in one - response). - :type continuation_token: str - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': '[MediaGraphTopology]'}, - 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyCollection, self).__init__(**kwargs) - self.value = kwargs.get('value', None) - self.continuation_token = kwargs.get('continuation_token', None) - - -class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): - """MediaGraphTopologyDeleteRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyDeleteRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologyDelete' # type: str - - -class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): - """MediaGraphTopologyGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyGetRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologyGet' # type: str - - -class MediaGraphTopologyListRequest(OperationBase): - """MediaGraphTopologyListRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyListRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologyList' # type: str - - -class MediaGraphTopologyProperties(msrest.serialization.Model): - """Describes the properties of a graph topology. - - :param description: An optional description for the instance. - :type description: str - :param parameters: An optional description for the instance. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] - :param sources: An optional description for the instance. - :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] - :param processors: An optional description for the instance. - :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] - :param sinks: name. - :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] - """ - - _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, - 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDeclaration]'}, - 'sources': {'key': 'sources', 'type': '[MediaGraphSource]'}, - 'processors': {'key': 'processors', 'type': '[MediaGraphProcessor]'}, - 'sinks': {'key': 'sinks', 'type': '[MediaGraphSink]'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyProperties, self).__init__(**kwargs) - self.description = kwargs.get('description', None) - self.parameters = kwargs.get('parameters', None) - self.sources = kwargs.get('sources', None) - self.processors = kwargs.get('processors', None) - self.sinks = kwargs.get('sinks', None) - - -class MediaGraphTopologySetRequest(OperationBase): - """MediaGraphTopologySetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param graph: Required. Describes a graph topology. - :type graph: ~azure.media.lva.edge.models.MediaGraphTopology - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'graph': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologySetRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologySet' # type: str - self.graph = kwargs['graph'] - - -class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): - """MediaGraphTopologySetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologySetRequestBody, self).__init__(**kwargs) - self.method_name = 'MediaGraphTopologySetRequestBody' # type: str - self.method_name = 'MediaGraphTopologySetRequestBody' # type: str - self.name = kwargs['name'] - self.system_data = kwargs.get('system_data', None) - self.properties = kwargs.get('properties', None) - - -class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): - """An endpoint that the media graph can connect to, with no encryption in transit. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphUnsecuredEndpoint, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphUnsecuredEndpoint' # type: str - - -class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): - """Username/password credential pair. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param username: Required. Username for a username/password pair. - :type username: str - :param password: Password for a username/password pair. - :type password: str - """ - - _validation = { - 'type': {'required': True}, - 'username': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'username': {'key': 'username', 'type': 'str'}, - 'password': {'key': 'password', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphUsernamePasswordCredentials, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphUsernamePasswordCredentials' # type: str - self.username = kwargs['username'] - self.password = kwargs.get('password', None) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models_py3.py deleted file mode 100644 index 5de3adde8e11..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models_py3.py +++ /dev/null @@ -1,2185 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import List, Optional, Union - -import msrest.serialization - -from ._definitionsfor_live_video_analyticson_io_tedge_enums import * - - -class OperationBase(msrest.serialization.Model): - """OperationBase. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - _subtype_map = { - 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(OperationBase, self).__init__(**kwargs) - self.method_name = None # type: Optional[str] - - -class ItemNonSetRequestBase(OperationBase): - """ItemNonSetRequestBase. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphInstanceActivateRequest, MediaGraphInstanceDeActivateRequest, MediaGraphInstanceDeleteRequest, MediaGraphInstanceGetRequest, MediaGraphTopologyDeleteRequest, MediaGraphTopologyGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - _subtype_map = { - 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(ItemNonSetRequestBase, self).__init__(**kwargs) - self.method_name = 'ItemNonSetRequestBase' # type: str - self.name = name - - -class MediaGraphSink(msrest.serialization.Model): - """Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphAssetSink, MediaGraphFileSink, MediaGraphIoTHubMessageSink. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphAssetSink': 'MediaGraphAssetSink', '#Microsoft.Media.MediaGraphFileSink': 'MediaGraphFileSink', '#Microsoft.Media.MediaGraphIoTHubMessageSink': 'MediaGraphIoTHubMessageSink'} - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - **kwargs - ): - super(MediaGraphSink, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = name - self.inputs = inputs - - -class MediaGraphAssetSink(MediaGraphSink): - """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param asset_name_pattern: A name pattern when creating new assets. - :type asset_name_pattern: str - :param segment_length: When writing media to an asset, wait until at least this duration of - media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum - of 30 seconds and a recommended maximum of 5 minutes. - :type segment_length: ~datetime.timedelta - :param local_media_cache_path: Path to a local file system directory for temporary caching of - media, before writing to an Asset. Used when the Edge device is temporarily disconnected from - Azure. - :type local_media_cache_path: str - :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for - temporary caching of media. - :type local_media_cache_maximum_size_mi_b: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, - 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, - 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, - 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - asset_name_pattern: Optional[str] = None, - segment_length: Optional[datetime.timedelta] = None, - local_media_cache_path: Optional[str] = None, - local_media_cache_maximum_size_mi_b: Optional[str] = None, - **kwargs - ): - super(MediaGraphAssetSink, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphAssetSink' # type: str - self.asset_name_pattern = asset_name_pattern - self.segment_length = segment_length - self.local_media_cache_path = local_media_cache_path - self.local_media_cache_maximum_size_mi_b = local_media_cache_maximum_size_mi_b - - -class MediaGraphCertificateSource(msrest.serialization.Model): - """Base class for certificate sources. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphPemCertificateList. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphPemCertificateList': 'MediaGraphPemCertificateList'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphCertificateSource, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphProcessor(msrest.serialization.Model): - """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - **kwargs - ): - super(MediaGraphProcessor, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = name - self.inputs = inputs - - -class MediaGraphExtensionProcessorBase(MediaGraphProcessor): - """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension': 'MediaGraphCognitiveServicesVisionExtension', '#Microsoft.Media.MediaGraphGrpcExtension': 'MediaGraphGrpcExtension', '#Microsoft.Media.MediaGraphHttpExtension': 'MediaGraphHttpExtension'} - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, - **kwargs - ): - super(MediaGraphExtensionProcessorBase, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str - self.endpoint = endpoint - self.image = image - - -class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, - **kwargs - ): - super(MediaGraphCognitiveServicesVisionExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) - self.type = '#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension' # type: str - - -class MediaGraphCredentials(msrest.serialization.Model): - """Credentials to present during authentication. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphHttpHeaderCredentials, MediaGraphUsernamePasswordCredentials. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphHttpHeaderCredentials': 'MediaGraphHttpHeaderCredentials', '#Microsoft.Media.MediaGraphUsernamePasswordCredentials': 'MediaGraphUsernamePasswordCredentials'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphCredentials, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphEndpoint(msrest.serialization.Model): - """Base class for endpoints. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphTlsEndpoint, MediaGraphUnsecuredEndpoint. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphTlsEndpoint': 'MediaGraphTlsEndpoint', '#Microsoft.Media.MediaGraphUnsecuredEndpoint': 'MediaGraphUnsecuredEndpoint'} - } - - def __init__( - self, - *, - url: str, - credentials: Optional["MediaGraphCredentials"] = None, - **kwargs - ): - super(MediaGraphEndpoint, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.credentials = credentials - self.url = url - - -class MediaGraphFileSink(MediaGraphSink): - """Enables a media graph to write/store media (video and audio) to a file on the Edge device. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param file_path_pattern: Required. Absolute file path pattern for creating new files on the - Edge device. - :type file_path_pattern: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - 'file_path_pattern': {'required': True, 'min_length': 1}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - file_path_pattern: str, - **kwargs - ): - super(MediaGraphFileSink, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str - self.file_path_pattern = file_path_pattern - - -class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): - """Limits the frame rate on the input video stream based on the maximumFps property. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not - exceed this limit. - :type maximum_fps: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - maximum_fps: Optional[str] = None, - **kwargs - ): - super(MediaGraphFrameRateFilterProcessor, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str - self.maximum_fps = maximum_fps - - -class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - :param data_transfer: Required. How media should be transferred to the inferencing engine. - :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - 'data_transfer': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - data_transfer: "MediaGraphGrpcExtensionDataTransfer", - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, - **kwargs - ): - super(MediaGraphGrpcExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) - self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str - self.data_transfer = data_transfer - - -class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): - """Describes how media should be transferred to the inferencing engine. - - All required parameters must be populated in order to send to Azure. - - :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if - mode is SharedMemory. Should not be specificed otherwise. - :type shared_memory_size_mi_b: str - :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible - values include: "Embedded", "SharedMemory". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode - """ - - _validation = { - 'mode': {'required': True}, - } - - _attribute_map = { - 'shared_memory_size_mi_b': {'key': 'sharedMemorySizeMiB', 'type': 'str'}, - 'mode': {'key': 'mode', 'type': 'str'}, - } - - def __init__( - self, - *, - mode: Union[str, "MediaGraphGrpcExtensionDataTransferMode"], - shared_memory_size_mi_b: Optional[str] = None, - **kwargs - ): - super(MediaGraphGrpcExtensionDataTransfer, self).__init__(**kwargs) - self.shared_memory_size_mi_b = shared_memory_size_mi_b - self.mode = mode - - -class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, - **kwargs - ): - super(MediaGraphHttpExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) - self.type = '#Microsoft.Media.MediaGraphHttpExtension' # type: str - - -class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): - """Http header service credentials. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param header_name: Required. HTTP header name. - :type header_name: str - :param header_value: Required. HTTP header value. - :type header_value: str - """ - - _validation = { - 'type': {'required': True}, - 'header_name': {'required': True}, - 'header_value': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'header_name': {'key': 'headerName', 'type': 'str'}, - 'header_value': {'key': 'headerValue', 'type': 'str'}, - } - - def __init__( - self, - *, - header_name: str, - header_value: str, - **kwargs - ): - super(MediaGraphHttpHeaderCredentials, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphHttpHeaderCredentials' # type: str - self.header_name = header_name - self.header_value = header_value - - -class MediaGraphImage(msrest.serialization.Model): - """Describes the properties of an image frame. - - :param scale: The scaling mode for the image. - :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale - :param format: Encoding settings for an image. - :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat - """ - - _attribute_map = { - 'scale': {'key': 'scale', 'type': 'MediaGraphImageScale'}, - 'format': {'key': 'format', 'type': 'MediaGraphImageFormat'}, - } - - def __init__( - self, - *, - scale: Optional["MediaGraphImageScale"] = None, - format: Optional["MediaGraphImageFormat"] = None, - **kwargs - ): - super(MediaGraphImage, self).__init__(**kwargs) - self.scale = scale - self.format = format - - -class MediaGraphImageFormat(msrest.serialization.Model): - """Encoding settings for an image. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImageFormat, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphImageFormatEncoded(MediaGraphImageFormat): - """Allowed formats for the image. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param encoding: The different encoding formats that can be used for the image. Possible values - include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". - :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat - :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best - quality). - :type quality: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'encoding': {'key': 'encoding', 'type': 'str'}, - 'quality': {'key': 'quality', 'type': 'str'}, - } - - def __init__( - self, - *, - encoding: Optional[Union[str, "MediaGraphImageEncodingFormat"]] = "Jpeg", - quality: Optional[str] = None, - **kwargs - ): - super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str - self.encoding = encoding - self.quality = quality - - -class MediaGraphImageFormatRaw(MediaGraphImageFormat): - """Encoding settings for raw images. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", - "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". - :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'pixel_format': {'key': 'pixelFormat', 'type': 'str'}, - } - - def __init__( - self, - *, - pixel_format: Optional[Union[str, "MediaGraphImageFormatRawPixelFormat"]] = None, - **kwargs - ): - super(MediaGraphImageFormatRaw, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphImageFormatRaw' # type: str - self.pixel_format = pixel_format - - -class MediaGraphImageScale(msrest.serialization.Model): - """The scaling mode for the image. - - :param mode: Describes the modes for scaling an input video frame into an image, before it is - sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode - :param width: The desired output width of the image. - :type width: str - :param height: The desired output height of the image. - :type height: str - """ - - _attribute_map = { - 'mode': {'key': 'mode', 'type': 'str'}, - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - } - - def __init__( - self, - *, - mode: Optional[Union[str, "MediaGraphImageScaleMode"]] = None, - width: Optional[str] = None, - height: Optional[str] = None, - **kwargs - ): - super(MediaGraphImageScale, self).__init__(**kwargs) - self.mode = mode - self.width = width - self.height = height - - -class MediaGraphInstance(msrest.serialization.Model): - """Represents a Media Graph instance. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, - } - - def __init__( - self, - *, - name: str, - system_data: Optional["MediaGraphSystemData"] = None, - properties: Optional["MediaGraphInstanceProperties"] = None, - **kwargs - ): - super(MediaGraphInstance, self).__init__(**kwargs) - self.name = name - self.system_data = system_data - self.properties = properties - - -class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): - """MediaGraphInstanceActivateRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphInstanceActivateRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphInstanceActivate' # type: str - - -class MediaGraphInstanceCollection(msrest.serialization.Model): - """Collection of graph instances. - - :param value: Collection of graph instances. - :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph instance collection (when the collection contains too many results to return in one - response). - :type continuation_token: str - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': '[MediaGraphInstance]'}, - 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, - } - - def __init__( - self, - *, - value: Optional[List["MediaGraphInstance"]] = None, - continuation_token: Optional[str] = None, - **kwargs - ): - super(MediaGraphInstanceCollection, self).__init__(**kwargs) - self.value = value - self.continuation_token = continuation_token - - -class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): - """MediaGraphInstanceDeActivateRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphInstanceDeActivateRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphInstanceDeactivate' # type: str - - -class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): - """MediaGraphInstanceDeleteRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphInstanceDeleteRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphInstanceDelete' # type: str - - -class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): - """MediaGraphInstanceGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphInstanceGetRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphInstanceGet' # type: str - - -class MediaGraphInstanceListRequest(OperationBase): - """MediaGraphInstanceListRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceListRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceList' # type: str - - -class MediaGraphInstanceProperties(msrest.serialization.Model): - """Properties of a Media Graph instance. - - :param description: An optional description for the instance. - :type description: str - :param topology_name: The name of the graph topology that this instance will run. A topology - with this name should already have been set in the Edge module. - :type topology_name: str - :param parameters: List of one or more graph instance parameters. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] - :param state: Allowed states for a graph Instance. Possible values include: "Inactive", - "Activating", "Active", "Deactivating". - :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState - """ - - _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, - 'topology_name': {'key': 'topologyName', 'type': 'str'}, - 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDefinition]'}, - 'state': {'key': 'state', 'type': 'str'}, - } - - def __init__( - self, - *, - description: Optional[str] = None, - topology_name: Optional[str] = None, - parameters: Optional[List["MediaGraphParameterDefinition"]] = None, - state: Optional[Union[str, "MediaGraphInstanceState"]] = None, - **kwargs - ): - super(MediaGraphInstanceProperties, self).__init__(**kwargs) - self.description = description - self.topology_name = topology_name - self.parameters = parameters - self.state = state - - -class MediaGraphInstanceSetRequest(OperationBase): - """MediaGraphInstanceSetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param instance: Required. Represents a Media Graph instance. - :type instance: ~azure.media.lva.edge.models.MediaGraphInstance - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'instance': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - instance: "MediaGraphInstance", - **kwargs - ): - super(MediaGraphInstanceSetRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceSet' # type: str - self.instance = instance - - -class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): - """MediaGraphInstanceSetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - system_data: Optional["MediaGraphSystemData"] = None, - properties: Optional["MediaGraphInstanceProperties"] = None, - **kwargs - ): - super(MediaGraphInstanceSetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) - self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str - self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str - self.name = name - self.system_data = system_data - self.properties = properties - - -class MediaGraphIoTHubMessageSink(MediaGraphSink): - """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param hub_output_name: Name of the output path to which the graph will publish message. These - messages can then be delivered to desired destinations by declaring routes referencing the - output path in the IoT Edge deployment manifest. - :type hub_output_name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'hub_output_name': {'key': 'hubOutputName', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - hub_output_name: Optional[str] = None, - **kwargs - ): - super(MediaGraphIoTHubMessageSink, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSink' # type: str - self.hub_output_name = hub_output_name - - -class MediaGraphSource(msrest.serialization.Model): - """Media graph source. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphIoTHubMessageSource': 'MediaGraphIoTHubMessageSource', '#Microsoft.Media.MediaGraphRtspSource': 'MediaGraphRtspSource'} - } - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphSource, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = name - - -class MediaGraphIoTHubMessageSource(MediaGraphSource): - """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - :param hub_input_name: Name of the input path where messages can be routed to (via routes - declared in the IoT Edge deployment manifest). - :type hub_input_name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'hub_input_name': {'key': 'hubInputName', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - hub_input_name: Optional[str] = None, - **kwargs - ): - super(MediaGraphIoTHubMessageSource, self).__init__(name=name, **kwargs) - self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSource' # type: str - self.hub_input_name = hub_input_name - - -class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): - """A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param sensitivity: Enumeration that specifies the sensitivity of the motion detection - processor. Possible values include: "Low", "Medium", "High". - :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity - :param output_motion_region: Indicates whether the processor should detect and output the - regions, within the video frame, where motion was detected. Default is true. - :type output_motion_region: bool - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, - 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - sensitivity: Optional[Union[str, "MediaGraphMotionDetectionSensitivity"]] = None, - output_motion_region: Optional[bool] = None, - **kwargs - ): - super(MediaGraphMotionDetectionProcessor, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str - self.sensitivity = sensitivity - self.output_motion_region = output_motion_region - - -class MediaGraphNodeInput(msrest.serialization.Model): - """Represents the input to any node in a media graph. - - :param node_name: The name of another node in the media graph, the output of which is used as - input to this node. - :type node_name: str - :param output_selectors: Allows for the selection of particular streams from another node. - :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] - """ - - _attribute_map = { - 'node_name': {'key': 'nodeName', 'type': 'str'}, - 'output_selectors': {'key': 'outputSelectors', 'type': '[MediaGraphOutputSelector]'}, - } - - def __init__( - self, - *, - node_name: Optional[str] = None, - output_selectors: Optional[List["MediaGraphOutputSelector"]] = None, - **kwargs - ): - super(MediaGraphNodeInput, self).__init__(**kwargs) - self.node_name = node_name - self.output_selectors = output_selectors - - -class MediaGraphOutputSelector(msrest.serialization.Model): - """Allows for the selection of particular streams from another node. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar property: The stream property to compare with. Default value: "mediaType". - :vartype property: str - :param operator: The operator to compare streams by. Possible values include: "is", "isNot". - :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator - :param value: Value to compare against. - :type value: str - """ - - _validation = { - 'property': {'constant': True}, - } - - _attribute_map = { - 'property': {'key': 'property', 'type': 'str'}, - 'operator': {'key': 'operator', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - property = "mediaType" - - def __init__( - self, - *, - operator: Optional[Union[str, "MediaGraphOutputSelectorOperator"]] = None, - value: Optional[str] = None, - **kwargs - ): - super(MediaGraphOutputSelector, self).__init__(**kwargs) - self.operator = operator - self.value = value - - -class MediaGraphParameterDeclaration(msrest.serialization.Model): - """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the parameter. - :type name: str - :param type: Required. name. Possible values include: "String", "SecretString", "Int", - "Double", "Bool". - :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType - :param description: Description of the parameter. - :type description: str - :param default: The default value for the parameter, to be used if the graph instance does not - specify a value. - :type default: str - """ - - _validation = { - 'name': {'required': True, 'max_length': 64, 'min_length': 0}, - 'type': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'default': {'key': 'default', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - type: Union[str, "MediaGraphParameterType"], - description: Optional[str] = None, - default: Optional[str] = None, - **kwargs - ): - super(MediaGraphParameterDeclaration, self).__init__(**kwargs) - self.name = name - self.type = type - self.description = description - self.default = default - - -class MediaGraphParameterDefinition(msrest.serialization.Model): - """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. Name of parameter as defined in the graph topology. - :type name: str - :param value: Required. Value of parameter. - :type value: str - """ - - _validation = { - 'name': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - value: str, - **kwargs - ): - super(MediaGraphParameterDefinition, self).__init__(**kwargs) - self.name = name - self.value = value - - -class MediaGraphPemCertificateList(MediaGraphCertificateSource): - """A list of PEM formatted certificates. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param certificates: Required. PEM formatted public certificates one per entry. - :type certificates: list[str] - """ - - _validation = { - 'type': {'required': True}, - 'certificates': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'certificates': {'key': 'certificates', 'type': '[str]'}, - } - - def __init__( - self, - *, - certificates: List[str], - **kwargs - ): - super(MediaGraphPemCertificateList, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphPemCertificateList' # type: str - self.certificates = certificates - - -class MediaGraphRtspSource(MediaGraphSource): - """Enables a graph to capture media from a RTSP server. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. - Possible values include: "Http", "Tcp". - :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport - :param endpoint: Required. RTSP endpoint of the stream that is being connected to. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'endpoint': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'transport': {'key': 'transport', 'type': 'str'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - } - - def __init__( - self, - *, - name: str, - endpoint: "MediaGraphEndpoint", - transport: Optional[Union[str, "MediaGraphRtspTransport"]] = None, - **kwargs - ): - super(MediaGraphRtspSource, self).__init__(name=name, **kwargs) - self.type = '#Microsoft.Media.MediaGraphRtspSource' # type: str - self.transport = transport - self.endpoint = endpoint - - -class MediaGraphSignalGateProcessor(MediaGraphProcessor): - """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param activation_evaluation_window: The period of time over which the gate gathers input - events, before evaluating them. - :type activation_evaluation_window: str - :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It - is an offset between the time the event is received, and the timestamp of the first media - sample (eg. video frame) that is allowed through by the gate. - :type activation_signal_offset: str - :param minimum_activation_time: The minimum period for which the gate remains open, in the - absence of subsequent triggers (events). - :type minimum_activation_time: str - :param maximum_activation_time: The maximum period for which the gate remains open, in the - presence of subsequent events. - :type maximum_activation_time: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'activation_evaluation_window': {'key': 'activationEvaluationWindow', 'type': 'str'}, - 'activation_signal_offset': {'key': 'activationSignalOffset', 'type': 'str'}, - 'minimum_activation_time': {'key': 'minimumActivationTime', 'type': 'str'}, - 'maximum_activation_time': {'key': 'maximumActivationTime', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - activation_evaluation_window: Optional[str] = None, - activation_signal_offset: Optional[str] = None, - minimum_activation_time: Optional[str] = None, - maximum_activation_time: Optional[str] = None, - **kwargs - ): - super(MediaGraphSignalGateProcessor, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphSignalGateProcessor' # type: str - self.activation_evaluation_window = activation_evaluation_window - self.activation_signal_offset = activation_signal_offset - self.minimum_activation_time = minimum_activation_time - self.maximum_activation_time = maximum_activation_time - - -class MediaGraphSystemData(msrest.serialization.Model): - """Graph system data. - - :param created_at: The timestamp of resource creation (UTC). - :type created_at: ~datetime.datetime - :param last_modified_at: The timestamp of resource last modification (UTC). - :type last_modified_at: ~datetime.datetime - """ - - _attribute_map = { - 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, - 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, - } - - def __init__( - self, - *, - created_at: Optional[datetime.datetime] = None, - last_modified_at: Optional[datetime.datetime] = None, - **kwargs - ): - super(MediaGraphSystemData, self).__init__(**kwargs) - self.created_at = created_at - self.last_modified_at = last_modified_at - - -class MediaGraphTlsEndpoint(MediaGraphEndpoint): - """An endpoint that the graph can connect to, which must be connected over TLS/SSL. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null - designates that Azure Media Service's source of trust should be used. - :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource - :param validation_options: Validation options to use when authenticating a TLS connection. By - default, strict validation is used. - :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'MediaGraphCertificateSource'}, - 'validation_options': {'key': 'validationOptions', 'type': 'MediaGraphTlsValidationOptions'}, - } - - def __init__( - self, - *, - url: str, - credentials: Optional["MediaGraphCredentials"] = None, - trusted_certificates: Optional["MediaGraphCertificateSource"] = None, - validation_options: Optional["MediaGraphTlsValidationOptions"] = None, - **kwargs - ): - super(MediaGraphTlsEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) - self.type = '#Microsoft.Media.MediaGraphTlsEndpoint' # type: str - self.trusted_certificates = trusted_certificates - self.validation_options = validation_options - - -class MediaGraphTlsValidationOptions(msrest.serialization.Model): - """Options for controlling the authentication of TLS endpoints. - - :param ignore_hostname: Boolean value ignoring the host name (common name) during validation. - :type ignore_hostname: str - :param ignore_signature: Boolean value ignoring the integrity of the certificate chain at the - current time. - :type ignore_signature: str - """ - - _attribute_map = { - 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, - 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, - } - - def __init__( - self, - *, - ignore_hostname: Optional[str] = None, - ignore_signature: Optional[str] = None, - **kwargs - ): - super(MediaGraphTlsValidationOptions, self).__init__(**kwargs) - self.ignore_hostname = ignore_hostname - self.ignore_signature = ignore_signature - - -class MediaGraphTopology(msrest.serialization.Model): - """Describes a graph topology. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, - } - - def __init__( - self, - *, - name: str, - system_data: Optional["MediaGraphSystemData"] = None, - properties: Optional["MediaGraphTopologyProperties"] = None, - **kwargs - ): - super(MediaGraphTopology, self).__init__(**kwargs) - self.name = name - self.system_data = system_data - self.properties = properties - - -class MediaGraphTopologyCollection(msrest.serialization.Model): - """Collection of graph topologies. - - :param value: Collection of graph topologies. - :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph topologies collection (when the collection contains too many results to return in one - response). - :type continuation_token: str - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': '[MediaGraphTopology]'}, - 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, - } - - def __init__( - self, - *, - value: Optional[List["MediaGraphTopology"]] = None, - continuation_token: Optional[str] = None, - **kwargs - ): - super(MediaGraphTopologyCollection, self).__init__(**kwargs) - self.value = value - self.continuation_token = continuation_token - - -class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): - """MediaGraphTopologyDeleteRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphTopologyDeleteRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphTopologyDelete' # type: str - - -class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): - """MediaGraphTopologyGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphTopologyGetRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphTopologyGet' # type: str - - -class MediaGraphTopologyListRequest(OperationBase): - """MediaGraphTopologyListRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyListRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologyList' # type: str - - -class MediaGraphTopologyProperties(msrest.serialization.Model): - """Describes the properties of a graph topology. - - :param description: An optional description for the instance. - :type description: str - :param parameters: An optional description for the instance. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] - :param sources: An optional description for the instance. - :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] - :param processors: An optional description for the instance. - :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] - :param sinks: name. - :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] - """ - - _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, - 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDeclaration]'}, - 'sources': {'key': 'sources', 'type': '[MediaGraphSource]'}, - 'processors': {'key': 'processors', 'type': '[MediaGraphProcessor]'}, - 'sinks': {'key': 'sinks', 'type': '[MediaGraphSink]'}, - } - - def __init__( - self, - *, - description: Optional[str] = None, - parameters: Optional[List["MediaGraphParameterDeclaration"]] = None, - sources: Optional[List["MediaGraphSource"]] = None, - processors: Optional[List["MediaGraphProcessor"]] = None, - sinks: Optional[List["MediaGraphSink"]] = None, - **kwargs - ): - super(MediaGraphTopologyProperties, self).__init__(**kwargs) - self.description = description - self.parameters = parameters - self.sources = sources - self.processors = processors - self.sinks = sinks - - -class MediaGraphTopologySetRequest(OperationBase): - """MediaGraphTopologySetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param graph: Required. Describes a graph topology. - :type graph: ~azure.media.lva.edge.models.MediaGraphTopology - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'graph': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - graph: "MediaGraphTopology", - **kwargs - ): - super(MediaGraphTopologySetRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologySet' # type: str - self.graph = graph - - -class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): - """MediaGraphTopologySetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - system_data: Optional["MediaGraphSystemData"] = None, - properties: Optional["MediaGraphTopologyProperties"] = None, - **kwargs - ): - super(MediaGraphTopologySetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) - self.method_name = 'MediaGraphTopologySetRequestBody' # type: str - self.method_name = 'MediaGraphTopologySetRequestBody' # type: str - self.name = name - self.system_data = system_data - self.properties = properties - - -class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): - """An endpoint that the media graph can connect to, with no encryption in transit. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - } - - def __init__( - self, - *, - url: str, - credentials: Optional["MediaGraphCredentials"] = None, - **kwargs - ): - super(MediaGraphUnsecuredEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) - self.type = '#Microsoft.Media.MediaGraphUnsecuredEndpoint' # type: str - - -class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): - """Username/password credential pair. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param username: Required. Username for a username/password pair. - :type username: str - :param password: Password for a username/password pair. - :type password: str - """ - - _validation = { - 'type': {'required': True}, - 'username': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'username': {'key': 'username', 'type': 'str'}, - 'password': {'key': 'password', 'type': 'str'}, - } - - def __init__( - self, - *, - username: str, - password: Optional[str] = None, - **kwargs - ): - super(MediaGraphUsernamePasswordCredentials, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphUsernamePasswordCredentials' # type: str - self.username = username - self.password = password diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/py.typed b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/py.typed deleted file mode 100644 index e5aff4f83af8..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/py.typed +++ /dev/null @@ -1 +0,0 @@ -# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md b/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md index d318650fa662..8b602ac52507 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md @@ -11,8 +11,8 @@ autorest --v3 --python ```yaml require: C:\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md -output-folder: ../azure/media/lva/edge/_generated -namespace: azure.media.lva.edge +output-folder: ../azure/media/livevideoanalytics/edge/_generated +namespace: azure.media.livevideoanalytics.edge no-namespace-folders: true license-header: MICROSOFT_MIT_NO_VERSION enable-xml: false From 8c46e0262b4534a9f72e558386ca818939fbef26 Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 3 Dec 2020 09:32:15 -0800 Subject: [PATCH 19/64] missed more namespace changes --- .../azure/media/livevideoanalytics/edge/__init__.py | 2 +- .../azure-media-livevideoanalytics-edge/samples/sample_lva.py | 2 +- sdk/media/ci.yml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py index 2a9c3cc68e52..17fe4565d648 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py @@ -1,5 +1,5 @@ __path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore -from azure.media.lva.edge._generated.models import (MediaGraphTopologySetRequestBody, +from azure.media.livevideoanalytics.edge._generated.models import (MediaGraphTopologySetRequestBody, MediaGraphTopologySetRequest, MediaGraphInstanceSetRequest, MediaGraphInstanceSetRequestBody) def _OverrideTopologySetRequestSerialize(self): diff --git a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py index 46a5d64d3c39..c89397b9c30a 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py @@ -1,7 +1,7 @@ import json import os -from azure.media.lva.edge._generated.models import * +from azure.media.livevideoanalytics.edge._generated.models import * from azure.iot.hub import IoTHubRegistryManager from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult from datetime import time diff --git a/sdk/media/ci.yml b/sdk/media/ci.yml index 2d63019f2b80..a1e4e391ec07 100644 --- a/sdk/media/ci.yml +++ b/sdk/media/ci.yml @@ -30,6 +30,6 @@ extends: Artifacts: - name: azure_mgmt_media safeName: azuremgmtmedia - - name: azure_media_lva_edge - safeName: azuremedialvaedge + - name: azure_media_livevideoanalytics_edge + safeName: azuremedialivevideoanalyticsedge From 250b3936dd36ca235161af0e39f7db0fab96b9cf Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 3 Dec 2020 10:11:19 -0800 Subject: [PATCH 20/64] changes based off PR comments --- .../CHANGELOG.md | 4 +- .../README.md | 4 +- .../media/livevideoanalytics/edge/_version.py | 2 +- .../dev_requirements.txt | 2 +- .../samples/sample_conditional_async.py | 48 ------ .../samples/sample_hello_world.py | 35 ---- .../setup.py | 7 +- .../swagger/autorest.md | 2 +- .../swagger/commandOutput.txt | 158 ------------------ 9 files changed, 9 insertions(+), 253 deletions(-) delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/samples/sample_conditional_async.py delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/samples/sample_hello_world.py delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/swagger/commandOutput.txt diff --git a/sdk/media/azure-media-livevideoanalytics-edge/CHANGELOG.md b/sdk/media/azure-media-livevideoanalytics-edge/CHANGELOG.md index 816f21db092e..ab5c55e39865 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/CHANGELOG.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/CHANGELOG.md @@ -3,6 +3,6 @@ ------------------- -## 0.0.1 (Unreleased) +## 1.0.0b1 (Unreleased) -- Training day! +Initial release diff --git a/sdk/media/azure-media-livevideoanalytics-edge/README.md b/sdk/media/azure-media-livevideoanalytics-edge/README.md index 5e665397682c..4ec628ab28e9 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/README.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/README.md @@ -7,7 +7,7 @@ Use the client library for Live Video Analytics on IoT Edge to: - Simplify interactions with the [Microsoft Azure IoT SDKs](https://github.com/azure/azure-iot-sdks) - Programatically construct media graph topologies and instances -[Package (PyPi)][package] | [Product documentation][doc_product] | [Direct methods][doc_direct_methods] | [Media graphs][doc_media_graph] | [Source code][source] | [Samples][samples] +[Package (PyPI)][package] | [Product documentation][doc_product] | [Direct methods][doc_direct_methods] | [Media graphs][doc_media_graph] | [Source code][source] | [Samples][samples] ## Getting started @@ -16,7 +16,7 @@ Use the client library for Live Video Analytics on IoT Edge to: Install the Live Video Analytics client library for Python with pip: ```bash -pip install azure-media-livevideoanalytics--edge +pip install azure-media-livevideoanalytics-edge ``` ### Prerequisites diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_version.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_version.py index f95f18986f48..6a6e5effdb40 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_version.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_version.py @@ -4,4 +4,4 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------- -VERSION = '0.0.1' +VERSION = '1.0.0b1' diff --git a/sdk/media/azure-media-livevideoanalytics-edge/dev_requirements.txt b/sdk/media/azure-media-livevideoanalytics-edge/dev_requirements.txt index c3cf063e6b31..97e51db43ae3 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/dev_requirements.txt +++ b/sdk/media/azure-media-livevideoanalytics-edge/dev_requirements.txt @@ -1,6 +1,6 @@ -../../core/azure-core -e ../../../tools/azure-devtools -e ../../../tools/azure-sdk-tools +../../core/azure-core -e ../../identity/azure-identity aiohttp>=3.0; python_version >= '3.5' aiodns>=2.0; python_version >= '3.5' diff --git a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_conditional_async.py b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_conditional_async.py deleted file mode 100644 index c894b9b71a09..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_conditional_async.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding: utf-8 - -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import asyncio -import os -from colorama import init, Style, Fore -init() - -from azure.identity.aio import DefaultAzureCredential -from azure.learnappconfig.aio import AppConfigurationClient -from azure.core.exceptions import ResourceNotFoundError, ResourceNotModifiedError -from azure.core import MatchConditions - - -async def main(): - url = os.environ.get('API-LEARN_ENDPOINT') - credential = DefaultAzureCredential() - async with AppConfigurationClient(account_url=url, credential=credential) as client: - - # Retrieve initial color value - try: - first_color = await client.get_configuration_setting(os.environ['API-LEARN_SETTING_COLOR_KEY']) - except ResourceNotFoundError: - raise - - # Get latest color value, only if it has changed - try: - new_color = await client.get_configuration_setting( - key=os.environ['API-LEARN_SETTING_COLOR_KEY'], - match_condition=MatchConditions.IfModified, - etag=first_color.etag - ) - except ResourceNotModifiedError: - new_color = first_color - - color = getattr(Fore, new_color.value.upper()) - greeting = 'Hello!' - print(f'{color}{greeting}{Style.RESET_ALL}') - - -if __name__ == "__main__": - loop = asyncio.get_event_loop() - loop.run_until_complete(main()) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_hello_world.py b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_hello_world.py deleted file mode 100644 index f6fa6e0686fd..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_hello_world.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding: utf-8 - -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from colorama import init, Style, Fore -init() - -from azure.identity import DefaultAzureCredential -from azure.learnappconfig import AppConfigurationClient - -def main(): - url = os.environ.get('API-LEARN_ENDPOINT') - credential = DefaultAzureCredential() - client = AppConfigurationClient(account_url=url, credential=credential) - - try: - color_setting = client.get_configuration_setting(os.environ['API-LEARN_SETTING_COLOR_KEY']) - color = color_setting.value.upper() - text_setting = client.get_configuration_setting(os.environ['API-LEARN_SETTING_TEXT_KEY']) - greeting = text_setting.value - except: - color = 'RED' - greeting = 'Default greeting' - - color = getattr(Fore, color) - print(f'{color}{greeting}{Style.RESET_ALL}') - - -if __name__ == "__main__": - main() diff --git a/sdk/media/azure-media-livevideoanalytics-edge/setup.py b/sdk/media/azure-media-livevideoanalytics-edge/setup.py index 324e31db3312..e1f1f3a85b11 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/setup.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/setup.py @@ -73,7 +73,7 @@ author_email='azpysdkhelp@microsoft.com', url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/appconfiguration/azure-appconfiguration', classifiers=[ - "Development Status :: 5 - Production/Stable", + "Development Status :: 4 - Beta", 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', @@ -82,6 +82,7 @@ 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', 'License :: OSI Approved :: MIT License', ], zip_safe=False, @@ -94,9 +95,5 @@ ":python_version<'3.0'": ['azure-nspkg'], ":python_version<'3.4'": ['enum34>=1.0.4'], ":python_version<'3.5'": ['typing'], - "async:python_version>='3.5'": [ - 'aiohttp>=3.0', - 'aiodns>=2.0' - ], } ) \ No newline at end of file diff --git a/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md b/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md index 8b602ac52507..03aa0ca72f85 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md @@ -1,4 +1,4 @@ -# Azure Queue Storage for Python +# Generate SDK using Autorest see `https://aka.ms/autorest` diff --git a/sdk/media/azure-media-livevideoanalytics-edge/swagger/commandOutput.txt b/sdk/media/azure-media-livevideoanalytics-edge/swagger/commandOutput.txt deleted file mode 100644 index 0290e6671f32..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/swagger/commandOutput.txt +++ /dev/null @@ -1,158 +0,0 @@ -AutoRest code generation utility [cli version: 3.0.6247; node: v12.16.1, max-memory: 2048 gb] -(C) 2018 Microsoft Corporation. -https://aka.ms/autorest -NOTE: AutoRest core version selected from configuration: 3.0.6302. - Loading AutoRest core 'C:\Users\hivyas\.autorest\@autorest_core@3.0.6302\node_modules\@autorest\core\dist' (3.0.6302) - Loading AutoRest extension '@autorest/python' (5.1.0-preview.7->5.1.0-preview.7) - Loading AutoRest extension '@autorest/modelerfour' (4.15.400->4.15.400) - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphTopologyListRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphTopologyGetRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphTopologyDeleteRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceListRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceGetRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceActivateRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceDeActivateRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceDeleteRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphUnsecuredEndpoint' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphCognitiveServicesVisionExtension' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphHttpExtension' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphInstanceCollection' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphTopologyCollection' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphRtspSource' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphIoTHubMessageSource' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphIoTHubMessageSink' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphUsernamePasswordCredentials' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphHttpHeaderCredentials' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphUnsecuredEndpoint' with an undefined type and 'allOf'/'anyOf'/'oneOf' is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphTlsEndpoint' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphPemCertificateList' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphOutputSelector' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphFileSink' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphAssetSink' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphMotionDetectionProcessor' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphExtensionProcessorBase' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphCognitiveServicesVisionExtension' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphGrpcExtension' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphGrpcExtensionDataTransfer' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphHttpExtension' with an undefined type and 'allOf'/'anyOf'/'oneOf' is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphImageFormatRaw' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphImageFormatEncoded' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphSignalGateProcessor' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphFrameRateFilterProcessor' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphRtspSource' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphIoTHubMessageSource' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphIoTHubMessageSink' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphUsernamePasswordCredentials' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphHttpHeaderCredentials' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphTlsEndpoint' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphPemCertificateList' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphFileSink' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphAssetSink' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphMotionDetectionProcessor' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphExtensionProcessorBase' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphGrpcExtension' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphImageFormatRaw' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphImageFormatEncoded' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphSignalGateProcessor' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphFrameRateFilterProcessor' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/CheckDuplicateSchemas): Checking for duplicate schemas, this could take a (long) while. Run with --verbose for more detail. - -WARNING (Modeler/MissingType): The schema 'components·109p5kc·schemas·mediagraphrtspsource·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·109p5kc·schemas·mediagraphrtspsource·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1af9g39·schemas·mediagraphiothubmessagesource·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1af9g39·schemas·mediagraphiothubmessagesource·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1jngw4h·schemas·mediagraphiothubmessagesink·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1jngw4h·schemas·mediagraphiothubmessagesink·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1mxkvbd·schemas·mediagraphusernamepasswordcredentials·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1mxkvbd·schemas·mediagraphusernamepasswordcredentials·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1uqp1b7·schemas·mediagraphhttpheadercredentials·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1uqp1b7·schemas·mediagraphhttpheadercredentials·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·q7dsz6·schemas·mediagraphtlsendpoint·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·q7dsz6·schemas·mediagraphtlsendpoint·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·7b4k0z·schemas·mediagraphpemcertificatelist·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·7b4k0z·schemas·mediagraphpemcertificatelist·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1nh92cj·schemas·mediagraphfilesink·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1nh92cj·schemas·mediagraphfilesink·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·z5bgs5·schemas·mediagraphassetsink·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·z5bgs5·schemas·mediagraphassetsink·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1vu24mc·schemas·mediagraphmotiondetectionprocessor·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1vu24mc·schemas·mediagraphmotiondetectionprocessor·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1axip85·schemas·mediagraphextensionprocessorbase·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1axip85·schemas·mediagraphextensionprocessorbase·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1yl8gs2·schemas·mediagraphgrpcextension·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1yl8gs2·schemas·mediagraphgrpcextension·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1k6pka5·schemas·mediagraphimageformatraw·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1k6pka5·schemas·mediagraphimageformatraw·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·nnu6mb·schemas·mediagraphimageformatencoded·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·nnu6mb·schemas·mediagraphimageformatencoded·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·dx5boa·schemas·mediagraphsignalgateprocessor·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·dx5boa·schemas·mediagraphsignalgateprocessor·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1hcm6ag·schemas·mediagraphframeratefilterprocessor·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1hcm6ag·schemas·mediagraphframeratefilterprocessor·allof·1 -Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? \ No newline at end of file From 46e4fa54acbedd777e6104e65eebd61aef9e5e56 Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 4 Dec 2020 08:59:04 -0800 Subject: [PATCH 21/64] sample changes and removing hardcoded strings --- .../edge/_generated/models/__init__.py | 28 +- ..._live_video_analyticson_io_tedge_enums.py} | 22 +- .../edge/_generated/models/_models.py | 486 +++++++++------- .../edge/_generated/models/_models_py3.py | 526 +++++++++++------- .../samples/sample_lva.py | 52 +- .../swagger/autorest.md | 2 +- 6 files changed, 656 insertions(+), 460 deletions(-) rename sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/{_definitionsfor_live_video_analyticson_io_tedge_enums.py => _direct_methodsfor_live_video_analyticson_io_tedge_enums.py} (88%) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/__init__.py index 2e389ab8ef9d..2a0c95b5d2f0 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/__init__.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/__init__.py @@ -15,14 +15,15 @@ from ._models_py3 import MediaGraphEndpoint from ._models_py3 import MediaGraphExtensionProcessorBase from ._models_py3 import MediaGraphFileSink - from ._models_py3 import MediaGraphFrameRateFilterProcessor from ._models_py3 import MediaGraphGrpcExtension from ._models_py3 import MediaGraphGrpcExtensionDataTransfer from ._models_py3 import MediaGraphHttpExtension from ._models_py3 import MediaGraphHttpHeaderCredentials from ._models_py3 import MediaGraphImage from ._models_py3 import MediaGraphImageFormat - from ._models_py3 import MediaGraphImageFormatEncoded + from ._models_py3 import MediaGraphImageFormatBmp + from ._models_py3 import MediaGraphImageFormatJpeg + from ._models_py3 import MediaGraphImageFormatPng from ._models_py3 import MediaGraphImageFormatRaw from ._models_py3 import MediaGraphImageScale from ._models_py3 import MediaGraphInstance @@ -45,6 +46,7 @@ from ._models_py3 import MediaGraphPemCertificateList from ._models_py3 import MediaGraphProcessor from ._models_py3 import MediaGraphRtspSource + from ._models_py3 import MediaGraphSamplingOptions from ._models_py3 import MediaGraphSignalGateProcessor from ._models_py3 import MediaGraphSink from ._models_py3 import MediaGraphSource @@ -61,7 +63,7 @@ from ._models_py3 import MediaGraphTopologySetRequestBody from ._models_py3 import MediaGraphUnsecuredEndpoint from ._models_py3 import MediaGraphUsernamePasswordCredentials - from ._models_py3 import OperationBase + from ._models_py3 import MethodRequest except (SyntaxError, ImportError): from ._models import ItemNonSetRequestBase # type: ignore from ._models import MediaGraphAssetSink # type: ignore @@ -71,14 +73,15 @@ from ._models import MediaGraphEndpoint # type: ignore from ._models import MediaGraphExtensionProcessorBase # type: ignore from ._models import MediaGraphFileSink # type: ignore - from ._models import MediaGraphFrameRateFilterProcessor # type: ignore from ._models import MediaGraphGrpcExtension # type: ignore from ._models import MediaGraphGrpcExtensionDataTransfer # type: ignore from ._models import MediaGraphHttpExtension # type: ignore from ._models import MediaGraphHttpHeaderCredentials # type: ignore from ._models import MediaGraphImage # type: ignore from ._models import MediaGraphImageFormat # type: ignore - from ._models import MediaGraphImageFormatEncoded # type: ignore + from ._models import MediaGraphImageFormatBmp # type: ignore + from ._models import MediaGraphImageFormatJpeg # type: ignore + from ._models import MediaGraphImageFormatPng # type: ignore from ._models import MediaGraphImageFormatRaw # type: ignore from ._models import MediaGraphImageScale # type: ignore from ._models import MediaGraphInstance # type: ignore @@ -101,6 +104,7 @@ from ._models import MediaGraphPemCertificateList # type: ignore from ._models import MediaGraphProcessor # type: ignore from ._models import MediaGraphRtspSource # type: ignore + from ._models import MediaGraphSamplingOptions # type: ignore from ._models import MediaGraphSignalGateProcessor # type: ignore from ._models import MediaGraphSink # type: ignore from ._models import MediaGraphSource # type: ignore @@ -117,11 +121,10 @@ from ._models import MediaGraphTopologySetRequestBody # type: ignore from ._models import MediaGraphUnsecuredEndpoint # type: ignore from ._models import MediaGraphUsernamePasswordCredentials # type: ignore - from ._models import OperationBase # type: ignore + from ._models import MethodRequest # type: ignore -from ._definitionsfor_live_video_analyticson_io_tedge_enums import ( +from ._direct_methodsfor_live_video_analyticson_io_tedge_enums import ( MediaGraphGrpcExtensionDataTransferMode, - MediaGraphImageEncodingFormat, MediaGraphImageFormatRawPixelFormat, MediaGraphImageScaleMode, MediaGraphInstanceState, @@ -140,14 +143,15 @@ 'MediaGraphEndpoint', 'MediaGraphExtensionProcessorBase', 'MediaGraphFileSink', - 'MediaGraphFrameRateFilterProcessor', 'MediaGraphGrpcExtension', 'MediaGraphGrpcExtensionDataTransfer', 'MediaGraphHttpExtension', 'MediaGraphHttpHeaderCredentials', 'MediaGraphImage', 'MediaGraphImageFormat', - 'MediaGraphImageFormatEncoded', + 'MediaGraphImageFormatBmp', + 'MediaGraphImageFormatJpeg', + 'MediaGraphImageFormatPng', 'MediaGraphImageFormatRaw', 'MediaGraphImageScale', 'MediaGraphInstance', @@ -170,6 +174,7 @@ 'MediaGraphPemCertificateList', 'MediaGraphProcessor', 'MediaGraphRtspSource', + 'MediaGraphSamplingOptions', 'MediaGraphSignalGateProcessor', 'MediaGraphSink', 'MediaGraphSource', @@ -186,9 +191,8 @@ 'MediaGraphTopologySetRequestBody', 'MediaGraphUnsecuredEndpoint', 'MediaGraphUsernamePasswordCredentials', - 'OperationBase', + 'MethodRequest', 'MediaGraphGrpcExtensionDataTransferMode', - 'MediaGraphImageEncodingFormat', 'MediaGraphImageFormatRawPixelFormat', 'MediaGraphImageScaleMode', 'MediaGraphInstanceState', diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py similarity index 88% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py index 6e78e4728244..8223cb77e4a2 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py @@ -33,17 +33,7 @@ class MediaGraphGrpcExtensionDataTransferMode(with_metaclass(_CaseInsensitiveEnu EMBEDDED = "Embedded" #: Frames are transferred embedded into the gRPC messages. SHARED_MEMORY = "SharedMemory" #: Frames are transferred through shared memory. -class MediaGraphImageEncodingFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The different encoding formats that can be used for the image. - """ - - JPEG = "Jpeg" #: JPEG image format. - BMP = "Bmp" #: BMP image format. - PNG = "Png" #: PNG image format. - class MediaGraphImageFormatRawPixelFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """pixel format - """ YUV420_P = "Yuv420p" #: Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples). RGB565_BE = "Rgb565be" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian. @@ -67,13 +57,13 @@ class MediaGraphImageScaleMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enu STRETCH = "Stretch" #: Stretch input frame to match given dimensions. class MediaGraphInstanceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Allowed states for a graph Instance. + """Allowed states for a graph instance. """ - INACTIVE = "Inactive" #: Inactive state. - ACTIVATING = "Activating" #: Activating state. - ACTIVE = "Active" #: Active state. - DEACTIVATING = "Deactivating" #: Deactivating state. + INACTIVE = "Inactive" #: The media graph instance is idle and not processing media. + ACTIVATING = "Activating" #: The media graph instance is transitioning into the active state. + ACTIVE = "Active" #: The media graph instance is active and processing media. + DEACTIVATING = "Deactivating" #: The media graph instance is transitioning into the inactive state. class MediaGraphMotionDetectionSensitivity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Enumeration that specifies the sensitivity of the motion detection processor. @@ -91,8 +81,6 @@ class MediaGraphOutputSelectorOperator(with_metaclass(_CaseInsensitiveEnumMeta, IS_NOT = "isNot" #: A media type is not the same type or a subtype. class MediaGraphParameterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """name - """ STRING = "String" #: A string parameter value. SECRET_STRING = "SecretString" #: A string to hold sensitive information as parameter value. diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py index b0cb8248aec0..f49575de77b6 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py @@ -9,8 +9,8 @@ import msrest.serialization -class OperationBase(msrest.serialization.Model): - """OperationBase. +class MethodRequest(msrest.serialization.Model): + """MethodRequest. You probably want to use the sub-classes and not this class directly. Known sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. @@ -19,7 +19,7 @@ class OperationBase(msrest.serialization.Model): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ @@ -37,17 +37,17 @@ class OperationBase(msrest.serialization.Model): 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} } - api_version = "1.0" + api_version = "2.0" def __init__( self, **kwargs ): - super(OperationBase, self).__init__(**kwargs) + super(MethodRequest, self).__init__(**kwargs) self.method_name = None # type: Optional[str] -class ItemNonSetRequestBase(OperationBase): +class ItemNonSetRequestBase(MethodRequest): """ItemNonSetRequestBase. You probably want to use the sub-classes and not this class directly. Known @@ -59,7 +59,7 @@ class ItemNonSetRequestBase(OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -81,7 +81,7 @@ class ItemNonSetRequestBase(OperationBase): 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -136,7 +136,7 @@ def __init__( class MediaGraphAssetSink(MediaGraphSink): - """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. + """Enables a media graph to record media to an Azure Media Services asset for subsequent playback. All required parameters must be populated in order to send to Azure. @@ -147,18 +147,18 @@ class MediaGraphAssetSink(MediaGraphSink): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param asset_name_pattern: A name pattern when creating new assets. + :param asset_name_pattern: Required. A name pattern when creating new assets. :type asset_name_pattern: str :param segment_length: When writing media to an asset, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum of 30 seconds and a recommended maximum of 5 minutes. - :type segment_length: ~datetime.timedelta - :param local_media_cache_path: Path to a local file system directory for temporary caching of - media, before writing to an Asset. Used when the Edge device is temporarily disconnected from - Azure. + :type segment_length: str + :param local_media_cache_path: Required. Path to a local file system directory for temporary + caching of media before writing to an Asset. Used when the Edge device is temporarily + disconnected from Azure. :type local_media_cache_path: str - :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for - temporary caching of media. + :param local_media_cache_maximum_size_mi_b: Required. Maximum amount of disk space that can be + used for temporary caching of media. :type local_media_cache_maximum_size_mi_b: str """ @@ -166,6 +166,9 @@ class MediaGraphAssetSink(MediaGraphSink): 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'asset_name_pattern': {'required': True}, + 'local_media_cache_path': {'required': True}, + 'local_media_cache_maximum_size_mi_b': {'required': True}, } _attribute_map = { @@ -173,7 +176,7 @@ class MediaGraphAssetSink(MediaGraphSink): 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, - 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, + 'segment_length': {'key': 'segmentLength', 'type': 'str'}, 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, } @@ -184,10 +187,10 @@ def __init__( ): super(MediaGraphAssetSink, self).__init__(**kwargs) self.type = '#Microsoft.Media.MediaGraphAssetSink' # type: str - self.asset_name_pattern = kwargs.get('asset_name_pattern', None) + self.asset_name_pattern = kwargs['asset_name_pattern'] self.segment_length = kwargs.get('segment_length', None) - self.local_media_cache_path = kwargs.get('local_media_cache_path', None) - self.local_media_cache_maximum_size_mi_b = kwargs.get('local_media_cache_maximum_size_mi_b', None) + self.local_media_cache_path = kwargs['local_media_cache_path'] + self.local_media_cache_maximum_size_mi_b = kwargs['local_media_cache_maximum_size_mi_b'] class MediaGraphCertificateSource(msrest.serialization.Model): @@ -226,7 +229,7 @@ class MediaGraphProcessor(msrest.serialization.Model): """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. + sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. All required parameters must be populated in order to send to Azure. @@ -252,7 +255,7 @@ class MediaGraphProcessor(msrest.serialization.Model): } _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} + 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} } def __init__( @@ -266,7 +269,7 @@ def __init__( class MediaGraphExtensionProcessorBase(MediaGraphProcessor): - """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. + """Processor that allows for extensions outside of the Live Video Analytics Edge module to be integrated into the graph. It is the base class for various different kinds of extension processor types. You probably want to use the sub-classes and not this class directly. Known sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. @@ -280,16 +283,22 @@ class MediaGraphExtensionProcessorBase(MediaGraphProcessor): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. + :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. + :param image: Required. Describes the parameters of the image that is sent as input to the + endpoint. :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :param sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'endpoint': {'required': True}, + 'image': {'required': True}, } _attribute_map = { @@ -298,6 +307,7 @@ class MediaGraphExtensionProcessorBase(MediaGraphProcessor): 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'sampling_options': {'key': 'samplingOptions', 'type': 'MediaGraphSamplingOptions'}, } _subtype_map = { @@ -310,8 +320,9 @@ def __init__( ): super(MediaGraphExtensionProcessorBase, self).__init__(**kwargs) self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str - self.endpoint = kwargs.get('endpoint', None) - self.image = kwargs.get('image', None) + self.endpoint = kwargs['endpoint'] + self.image = kwargs['image'] + self.sampling_options = kwargs.get('sampling_options', None) class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): @@ -326,16 +337,22 @@ class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBas :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. + :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. + :param image: Required. Describes the parameters of the image that is sent as input to the + endpoint. :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :param sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'endpoint': {'required': True}, + 'image': {'required': True}, } _attribute_map = { @@ -344,6 +361,7 @@ class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBas 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'sampling_options': {'key': 'samplingOptions', 'type': 'MediaGraphSamplingOptions'}, } def __init__( @@ -439,23 +457,33 @@ class MediaGraphFileSink(MediaGraphSink): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param file_path_pattern: Required. Absolute file path pattern for creating new files on the - Edge device. - :type file_path_pattern: str + :param base_directory_path: Required. Absolute directory for all outputs to the Edge device + from this sink. + :type base_directory_path: str + :param file_name_pattern: Required. File name pattern for creating new files on the Edge + device. + :type file_name_pattern: str + :param maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing + files from this sink. + :type maximum_size_mi_b: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, - 'file_path_pattern': {'required': True, 'min_length': 1}, + 'base_directory_path': {'required': True}, + 'file_name_pattern': {'required': True}, + 'maximum_size_mi_b': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, + 'base_directory_path': {'key': 'baseDirectoryPath', 'type': 'str'}, + 'file_name_pattern': {'key': 'fileNamePattern', 'type': 'str'}, + 'maximum_size_mi_b': {'key': 'maximumSizeMiB', 'type': 'str'}, } def __init__( @@ -464,46 +492,9 @@ def __init__( ): super(MediaGraphFileSink, self).__init__(**kwargs) self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str - self.file_path_pattern = kwargs['file_path_pattern'] - - -class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): - """Limits the frame rate on the input video stream based on the maximumFps property. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not - exceed this limit. - :type maximum_fps: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphFrameRateFilterProcessor, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str - self.maximum_fps = kwargs.get('maximum_fps', None) + self.base_directory_path = kwargs['base_directory_path'] + self.file_name_pattern = kwargs['file_name_pattern'] + self.maximum_size_mi_b = kwargs['maximum_size_mi_b'] class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): @@ -518,19 +509,27 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. + :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. + :param image: Required. Describes the parameters of the image that is sent as input to the + endpoint. :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :param sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions :param data_transfer: Required. How media should be transferred to the inferencing engine. :type data_transfer: ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransfer + :param extension_configuration: Optional configuration to pass to the gRPC extension. + :type extension_configuration: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'endpoint': {'required': True}, + 'image': {'required': True}, 'data_transfer': {'required': True}, } @@ -540,7 +539,9 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'sampling_options': {'key': 'samplingOptions', 'type': 'MediaGraphSamplingOptions'}, 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, + 'extension_configuration': {'key': 'extensionConfiguration', 'type': 'str'}, } def __init__( @@ -550,6 +551,7 @@ def __init__( super(MediaGraphGrpcExtension, self).__init__(**kwargs) self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str self.data_transfer = kwargs['data_transfer'] + self.extension_configuration = kwargs.get('extension_configuration', None) class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): @@ -558,7 +560,7 @@ class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if - mode is SharedMemory. Should not be specificed otherwise. + mode is SharedMemory. Should not be specified otherwise. :type shared_memory_size_mi_b: str :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible values include: "Embedded", "SharedMemory". @@ -596,16 +598,22 @@ class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. + :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. + :param image: Required. Describes the parameters of the image that is sent as input to the + endpoint. :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :param sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'endpoint': {'required': True}, + 'image': {'required': True}, } _attribute_map = { @@ -614,6 +622,7 @@ class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'sampling_options': {'key': 'samplingOptions', 'type': 'MediaGraphSamplingOptions'}, } def __init__( @@ -633,7 +642,8 @@ class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): :type type: str :param header_name: Required. HTTP header name. :type header_name: str - :param header_value: Required. HTTP header value. + :param header_value: Required. HTTP header value. Please use a parameter so that the actual + value is not returned on PUT or GET requests. :type header_value: str """ @@ -686,7 +696,7 @@ class MediaGraphImageFormat(msrest.serialization.Model): """Encoding settings for an image. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. + sub-classes are: MediaGraphImageFormatBmp, MediaGraphImageFormatJpeg, MediaGraphImageFormatPng, MediaGraphImageFormatRaw. All required parameters must be populated in order to send to Azure. @@ -703,7 +713,7 @@ class MediaGraphImageFormat(msrest.serialization.Model): } _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} + 'type': {'#Microsoft.Media.MediaGraphImageFormatBmp': 'MediaGraphImageFormatBmp', '#Microsoft.Media.MediaGraphImageFormatJpeg': 'MediaGraphImageFormatJpeg', '#Microsoft.Media.MediaGraphImageFormatPng': 'MediaGraphImageFormatPng', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} } def __init__( @@ -714,19 +724,39 @@ def __init__( self.type = None # type: Optional[str] -class MediaGraphImageFormatEncoded(MediaGraphImageFormat): - """Allowed formats for the image. +class MediaGraphImageFormatBmp(MediaGraphImageFormat): + """Encoding settings for Bmp images. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param encoding: The different encoding formats that can be used for the image. Possible values - include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". - :type encoding: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphImageEncodingFormat - :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best - quality). + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormatBmp, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatBmp' # type: str + + +class MediaGraphImageFormatJpeg(MediaGraphImageFormat): + """Encoding settings for Jpeg images. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param quality: The image quality. Value must be between 0 to 100 (best quality). :type quality: str """ @@ -736,7 +766,6 @@ class MediaGraphImageFormatEncoded(MediaGraphImageFormat): _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, - 'encoding': {'key': 'encoding', 'type': 'str'}, 'quality': {'key': 'quality', 'type': 'str'}, } @@ -744,12 +773,36 @@ def __init__( self, **kwargs ): - super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str - self.encoding = kwargs.get('encoding', "Jpeg") + super(MediaGraphImageFormatJpeg, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatJpeg' # type: str self.quality = kwargs.get('quality', None) +class MediaGraphImageFormatPng(MediaGraphImageFormat): + """Encoding settings for Png images. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormatPng, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatPng' # type: str + + class MediaGraphImageFormatRaw(MediaGraphImageFormat): """Encoding settings for raw images. @@ -757,7 +810,7 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", + :param pixel_format: Required. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". :type pixel_format: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormatRawPixelFormat @@ -765,6 +818,7 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): _validation = { 'type': {'required': True}, + 'pixel_format': {'required': True}, } _attribute_map = { @@ -778,14 +832,17 @@ def __init__( ): super(MediaGraphImageFormatRaw, self).__init__(**kwargs) self.type = '#Microsoft.Media.MediaGraphImageFormatRaw' # type: str - self.pixel_format = kwargs.get('pixel_format', None) + self.pixel_format = kwargs['pixel_format'] class MediaGraphImageScale(msrest.serialization.Model): """The scaling mode for the image. - :param mode: Describes the modes for scaling an input video frame into an image, before it is - sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". + All required parameters must be populated in order to send to Azure. + + :param mode: Required. Describes the modes for scaling an input video frame into an image, + before it is sent to an inference engine. Possible values include: "PreserveAspectRatio", + "Pad", "Stretch". :type mode: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScaleMode :param width: The desired output width of the image. :type width: str @@ -793,6 +850,10 @@ class MediaGraphImageScale(msrest.serialization.Model): :type height: str """ + _validation = { + 'mode': {'required': True}, + } + _attribute_map = { 'mode': {'key': 'mode', 'type': 'str'}, 'width': {'key': 'width', 'type': 'str'}, @@ -804,21 +865,22 @@ def __init__( **kwargs ): super(MediaGraphImageScale, self).__init__(**kwargs) - self.mode = kwargs.get('mode', None) + self.mode = kwargs['mode'] self.width = kwargs.get('width', None) self.height = kwargs.get('height', None) class MediaGraphInstance(msrest.serialization.Model): - """Represents a Media Graph instance. + """Represents an instance of a media graph. All required parameters must be populated in order to send to Azure. - :param name: Required. name. + :param name: Required. :type name: str - :param system_data: Graph system data. + :param system_data: The system data for a resource. This is used by both topologies and + instances. :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. + :param properties: Properties of a media graph instance. :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties """ @@ -851,7 +913,7 @@ class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -869,7 +931,7 @@ class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -880,13 +942,13 @@ def __init__( class MediaGraphInstanceCollection(msrest.serialization.Model): - """Collection of graph instances. + """A collection of media graph instances. - :param value: Collection of graph instances. + :param value: A collection of media graph instances. :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphInstance] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph instance collection (when the collection contains too many results to return in one - response). + :param continuation_token: A continuation token to use in subsequent calls to enumerate through + the graph instance collection. This is used when the collection contains too many results to + return in one response. :type continuation_token: str """ @@ -913,7 +975,7 @@ class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -931,7 +993,7 @@ class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -950,7 +1012,7 @@ class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -968,7 +1030,7 @@ class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -987,7 +1049,7 @@ class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -1005,7 +1067,7 @@ class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1015,14 +1077,14 @@ def __init__( self.method_name = 'GraphInstanceGet' # type: str -class MediaGraphInstanceListRequest(OperationBase): +class MediaGraphInstanceListRequest(MethodRequest): """MediaGraphInstanceListRequest. Variables are only populated by the server, and will be ignored when sending a request. :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ @@ -1036,7 +1098,7 @@ class MediaGraphInstanceListRequest(OperationBase): 'api_version': {'key': '@apiVersion', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1047,17 +1109,17 @@ def __init__( class MediaGraphInstanceProperties(msrest.serialization.Model): - """Properties of a Media Graph instance. + """Properties of a media graph instance. :param description: An optional description for the instance. :type description: str - :param topology_name: The name of the graph topology that this instance will run. A topology - with this name should already have been set in the Edge module. + :param topology_name: The name of the media graph topology that this instance will run. A + topology with this name should already have been set in the Edge module. :type topology_name: str :param parameters: List of one or more graph instance parameters. :type parameters: list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDefinition] - :param state: Allowed states for a graph Instance. Possible values include: "Inactive", + :param state: Allowed states for a graph instance. Possible values include: "Inactive", "Activating", "Active", "Deactivating". :type state: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceState """ @@ -1080,7 +1142,7 @@ def __init__( self.state = kwargs.get('state', None) -class MediaGraphInstanceSetRequest(OperationBase): +class MediaGraphInstanceSetRequest(MethodRequest): """MediaGraphInstanceSetRequest. Variables are only populated by the server, and will be ignored when sending a request. @@ -1089,9 +1151,9 @@ class MediaGraphInstanceSetRequest(OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param instance: Required. Represents a Media Graph instance. + :param instance: Required. Represents an instance of a media graph. :type instance: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstance """ @@ -1107,7 +1169,7 @@ class MediaGraphInstanceSetRequest(OperationBase): 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1118,7 +1180,7 @@ def __init__( self.instance = kwargs['instance'] -class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): +class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): """MediaGraphInstanceSetRequestBody. Variables are only populated by the server, and will be ignored when sending a request. @@ -1127,13 +1189,14 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param name: Required. name. + :param name: Required. :type name: str - :param system_data: Graph system data. + :param system_data: The system data for a resource. This is used by both topologies and + instances. :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. + :param properties: Properties of a media graph instance. :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties """ @@ -1151,7 +1214,7 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1166,7 +1229,7 @@ def __init__( class MediaGraphIoTHubMessageSink(MediaGraphSink): - """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. + """Enables a media graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. All required parameters must be populated in order to send to Azure. @@ -1177,9 +1240,9 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param hub_output_name: Name of the output path to which the graph will publish message. These - messages can then be delivered to desired destinations by declaring routes referencing the - output path in the IoT Edge deployment manifest. + :param hub_output_name: Required. Name of the output path to which the media graph will publish + message. These messages can then be delivered to desired destinations by declaring routes + referencing the output path in the IoT Edge deployment manifest. :type hub_output_name: str """ @@ -1187,6 +1250,7 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'hub_output_name': {'required': True}, } _attribute_map = { @@ -1202,11 +1266,11 @@ def __init__( ): super(MediaGraphIoTHubMessageSink, self).__init__(**kwargs) self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSink' # type: str - self.hub_output_name = kwargs.get('hub_output_name', None) + self.hub_output_name = kwargs['hub_output_name'] class MediaGraphSource(msrest.serialization.Model): - """Media graph source. + """A source node in a media graph. You probably want to use the sub-classes and not this class directly. Known sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. @@ -1244,7 +1308,7 @@ def __init__( class MediaGraphIoTHubMessageSource(MediaGraphSource): - """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. + """Enables a media graph to receive messages via routes declared in the IoT Edge deployment manifest. All required parameters must be populated in order to send to Azure. @@ -1297,6 +1361,8 @@ class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): :param output_motion_region: Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true. :type output_motion_region: bool + :param event_aggregation_window: Event aggregation window duration, or 0 for no aggregation. + :type event_aggregation_window: str """ _validation = { @@ -1311,6 +1377,7 @@ class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, + 'event_aggregation_window': {'key': 'eventAggregationWindow', 'type': 'str'}, } def __init__( @@ -1321,6 +1388,7 @@ def __init__( self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str self.sensitivity = kwargs.get('sensitivity', None) self.output_motion_region = kwargs.get('output_motion_region', None) + self.event_aggregation_window = kwargs.get('event_aggregation_window', None) class MediaGraphNodeInput(msrest.serialization.Model): @@ -1384,19 +1452,19 @@ def __init__( class MediaGraphParameterDeclaration(msrest.serialization.Model): - """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + """The declaration of a parameter in the media graph topology. A media graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the parameter. :type name: str - :param type: Required. name. Possible values include: "String", "SecretString", "Int", - "Double", "Bool". + :param type: Required. Possible values include: "String", "SecretString", "Int", "Double", + "Bool". :type type: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphParameterType :param description: Description of the parameter. :type description: str - :param default: The default value for the parameter, to be used if the graph instance does not - specify a value. + :param default: The default value for the parameter to be used if the media graph instance does + not specify a value. :type default: str """ @@ -1424,11 +1492,11 @@ def __init__( class MediaGraphParameterDefinition(msrest.serialization.Model): - """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + """A key-value pair. A media graph topology allows certain values to be parameterized. When an instance is created, the parameters are supplied with arguments specific to that instance. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. All required parameters must be populated in order to send to Azure. - :param name: Required. Name of parameter as defined in the graph topology. + :param name: Required. Name of parameter as defined in the media graph topology. :type name: str :param value: Required. Value of parameter. :type value: str @@ -1484,7 +1552,7 @@ def __init__( class MediaGraphRtspSource(MediaGraphSource): - """Enables a graph to capture media from a RTSP server. + """Enables a media graph to capture media from a RTSP server. All required parameters must be populated in order to send to Azure. @@ -1523,6 +1591,30 @@ def __init__( self.endpoint = kwargs['endpoint'] +class MediaGraphSamplingOptions(msrest.serialization.Model): + """Describes the properties of a sample. + + :param skip_samples_without_annotation: If true, limits the samples submitted to the extension + to only samples which have associated inference(s). + :type skip_samples_without_annotation: str + :param maximum_samples_per_second: Maximum rate of samples submitted to the extension. + :type maximum_samples_per_second: str + """ + + _attribute_map = { + 'skip_samples_without_annotation': {'key': 'skipSamplesWithoutAnnotation', 'type': 'str'}, + 'maximum_samples_per_second': {'key': 'maximumSamplesPerSecond', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSamplingOptions, self).__init__(**kwargs) + self.skip_samples_without_annotation = kwargs.get('skip_samples_without_annotation', None) + self.maximum_samples_per_second = kwargs.get('maximum_samples_per_second', None) + + class MediaGraphSignalGateProcessor(MediaGraphProcessor): """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. @@ -1536,17 +1628,17 @@ class MediaGraphSignalGateProcessor(MediaGraphProcessor): outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param activation_evaluation_window: The period of time over which the gate gathers input - events, before evaluating them. + events before evaluating them. :type activation_evaluation_window: str - :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It - is an offset between the time the event is received, and the timestamp of the first media - sample (eg. video frame) that is allowed through by the gate. + :param activation_signal_offset: Required. Signal offset once the gate is activated (can be + negative). It is an offset between the time the event is received, and the timestamp of the + first media sample (eg. video frame) that is allowed through by the gate. :type activation_signal_offset: str - :param minimum_activation_time: The minimum period for which the gate remains open, in the - absence of subsequent triggers (events). + :param minimum_activation_time: Required. The minimum period for which the gate remains open in + the absence of subsequent triggers (events). :type minimum_activation_time: str - :param maximum_activation_time: The maximum period for which the gate remains open, in the - presence of subsequent events. + :param maximum_activation_time: Required. The maximum period for which the gate remains open in + the presence of subsequent events. :type maximum_activation_time: str """ @@ -1554,6 +1646,9 @@ class MediaGraphSignalGateProcessor(MediaGraphProcessor): 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'activation_signal_offset': {'required': True}, + 'minimum_activation_time': {'required': True}, + 'maximum_activation_time': {'required': True}, } _attribute_map = { @@ -1573,13 +1668,13 @@ def __init__( super(MediaGraphSignalGateProcessor, self).__init__(**kwargs) self.type = '#Microsoft.Media.MediaGraphSignalGateProcessor' # type: str self.activation_evaluation_window = kwargs.get('activation_evaluation_window', None) - self.activation_signal_offset = kwargs.get('activation_signal_offset', None) - self.minimum_activation_time = kwargs.get('minimum_activation_time', None) - self.maximum_activation_time = kwargs.get('maximum_activation_time', None) + self.activation_signal_offset = kwargs['activation_signal_offset'] + self.minimum_activation_time = kwargs['minimum_activation_time'] + self.maximum_activation_time = kwargs['maximum_activation_time'] class MediaGraphSystemData(msrest.serialization.Model): - """Graph system data. + """The system data for a resource. This is used by both topologies and instances. :param created_at: The timestamp of resource creation (UTC). :type created_at: ~datetime.datetime @@ -1602,7 +1697,7 @@ def __init__( class MediaGraphTlsEndpoint(MediaGraphEndpoint): - """An endpoint that the graph can connect to, which must be connected over TLS/SSL. + """A TLS endpoint for media graph external connections. All required parameters must be populated in order to send to Azure. @@ -1670,15 +1765,16 @@ def __init__( class MediaGraphTopology(msrest.serialization.Model): - """Describes a graph topology. + """A description of a media graph topology. All required parameters must be populated in order to send to Azure. - :param name: Required. name. + :param name: Required. :type name: str - :param system_data: Graph system data. + :param system_data: The system data for a resource. This is used by both topologies and + instances. :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. + :param properties: A description of the properties of a media graph topology. :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties """ @@ -1703,13 +1799,13 @@ def __init__( class MediaGraphTopologyCollection(msrest.serialization.Model): - """Collection of graph topologies. + """A collection of media graph topologies. - :param value: Collection of graph topologies. + :param value: A collection of media graph topologies. :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphTopology] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph topologies collection (when the collection contains too many results to return in one - response). + :param continuation_token: A continuation token to use in subsequent calls to enumerate through + the graph topologies collection. This is used when the collection contains too many results to + return in one response. :type continuation_token: str """ @@ -1736,7 +1832,7 @@ class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -1754,7 +1850,7 @@ class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1773,7 +1869,7 @@ class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -1791,7 +1887,7 @@ class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1801,14 +1897,14 @@ def __init__( self.method_name = 'GraphTopologyGet' # type: str -class MediaGraphTopologyListRequest(OperationBase): +class MediaGraphTopologyListRequest(MethodRequest): """MediaGraphTopologyListRequest. Variables are only populated by the server, and will be ignored when sending a request. :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ @@ -1822,7 +1918,7 @@ class MediaGraphTopologyListRequest(OperationBase): 'api_version': {'key': '@apiVersion', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1833,18 +1929,18 @@ def __init__( class MediaGraphTopologyProperties(msrest.serialization.Model): - """Describes the properties of a graph topology. + """A description of the properties of a media graph topology. - :param description: An optional description for the instance. + :param description: :type description: str - :param parameters: An optional description for the instance. + :param parameters: :type parameters: list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDeclaration] - :param sources: An optional description for the instance. + :param sources: :type sources: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSource] - :param processors: An optional description for the instance. + :param processors: :type processors: list[~azure.media.livevideoanalytics.edge.models.MediaGraphProcessor] - :param sinks: name. + :param sinks: :type sinks: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSink] """ @@ -1868,7 +1964,7 @@ def __init__( self.sinks = kwargs.get('sinks', None) -class MediaGraphTopologySetRequest(OperationBase): +class MediaGraphTopologySetRequest(MethodRequest): """MediaGraphTopologySetRequest. Variables are only populated by the server, and will be ignored when sending a request. @@ -1877,9 +1973,9 @@ class MediaGraphTopologySetRequest(OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param graph: Required. Describes a graph topology. + :param graph: Required. A description of a media graph topology. :type graph: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopology """ @@ -1895,7 +1991,7 @@ class MediaGraphTopologySetRequest(OperationBase): 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1906,7 +2002,7 @@ def __init__( self.graph = kwargs['graph'] -class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): +class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): """MediaGraphTopologySetRequestBody. Variables are only populated by the server, and will be ignored when sending a request. @@ -1915,13 +2011,14 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param name: Required. name. + :param name: Required. :type name: str - :param system_data: Graph system data. + :param system_data: The system data for a resource. This is used by both topologies and + instances. :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. + :param properties: A description of the properties of a media graph topology. :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties """ @@ -1939,7 +2036,7 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1994,7 +2091,8 @@ class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): :type type: str :param username: Required. Username for a username/password pair. :type username: str - :param password: Password for a username/password pair. + :param password: Password for a username/password pair. Please use a parameter so that the + actual value is not returned on PUT or GET requests. :type password: str """ diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py index a71214b4003f..d96578ee0a08 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py @@ -11,11 +11,11 @@ import msrest.serialization -from ._definitionsfor_live_video_analyticson_io_tedge_enums import * +from ._direct_methodsfor_live_video_analyticson_io_tedge_enums import * -class OperationBase(msrest.serialization.Model): - """OperationBase. +class MethodRequest(msrest.serialization.Model): + """MethodRequest. You probably want to use the sub-classes and not this class directly. Known sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. @@ -24,7 +24,7 @@ class OperationBase(msrest.serialization.Model): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ @@ -42,17 +42,17 @@ class OperationBase(msrest.serialization.Model): 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} } - api_version = "1.0" + api_version = "2.0" def __init__( self, **kwargs ): - super(OperationBase, self).__init__(**kwargs) + super(MethodRequest, self).__init__(**kwargs) self.method_name = None # type: Optional[str] -class ItemNonSetRequestBase(OperationBase): +class ItemNonSetRequestBase(MethodRequest): """ItemNonSetRequestBase. You probably want to use the sub-classes and not this class directly. Known @@ -64,7 +64,7 @@ class ItemNonSetRequestBase(OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -86,7 +86,7 @@ class ItemNonSetRequestBase(OperationBase): 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -146,7 +146,7 @@ def __init__( class MediaGraphAssetSink(MediaGraphSink): - """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. + """Enables a media graph to record media to an Azure Media Services asset for subsequent playback. All required parameters must be populated in order to send to Azure. @@ -157,18 +157,18 @@ class MediaGraphAssetSink(MediaGraphSink): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param asset_name_pattern: A name pattern when creating new assets. + :param asset_name_pattern: Required. A name pattern when creating new assets. :type asset_name_pattern: str :param segment_length: When writing media to an asset, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum of 30 seconds and a recommended maximum of 5 minutes. - :type segment_length: ~datetime.timedelta - :param local_media_cache_path: Path to a local file system directory for temporary caching of - media, before writing to an Asset. Used when the Edge device is temporarily disconnected from - Azure. + :type segment_length: str + :param local_media_cache_path: Required. Path to a local file system directory for temporary + caching of media before writing to an Asset. Used when the Edge device is temporarily + disconnected from Azure. :type local_media_cache_path: str - :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for - temporary caching of media. + :param local_media_cache_maximum_size_mi_b: Required. Maximum amount of disk space that can be + used for temporary caching of media. :type local_media_cache_maximum_size_mi_b: str """ @@ -176,6 +176,9 @@ class MediaGraphAssetSink(MediaGraphSink): 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'asset_name_pattern': {'required': True}, + 'local_media_cache_path': {'required': True}, + 'local_media_cache_maximum_size_mi_b': {'required': True}, } _attribute_map = { @@ -183,7 +186,7 @@ class MediaGraphAssetSink(MediaGraphSink): 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, - 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, + 'segment_length': {'key': 'segmentLength', 'type': 'str'}, 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, } @@ -193,10 +196,10 @@ def __init__( *, name: str, inputs: List["MediaGraphNodeInput"], - asset_name_pattern: Optional[str] = None, - segment_length: Optional[datetime.timedelta] = None, - local_media_cache_path: Optional[str] = None, - local_media_cache_maximum_size_mi_b: Optional[str] = None, + asset_name_pattern: str, + local_media_cache_path: str, + local_media_cache_maximum_size_mi_b: str, + segment_length: Optional[str] = None, **kwargs ): super(MediaGraphAssetSink, self).__init__(name=name, inputs=inputs, **kwargs) @@ -243,7 +246,7 @@ class MediaGraphProcessor(msrest.serialization.Model): """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. + sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. All required parameters must be populated in order to send to Azure. @@ -269,7 +272,7 @@ class MediaGraphProcessor(msrest.serialization.Model): } _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} + 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} } def __init__( @@ -286,7 +289,7 @@ def __init__( class MediaGraphExtensionProcessorBase(MediaGraphProcessor): - """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. + """Processor that allows for extensions outside of the Live Video Analytics Edge module to be integrated into the graph. It is the base class for various different kinds of extension processor types. You probably want to use the sub-classes and not this class directly. Known sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. @@ -300,16 +303,22 @@ class MediaGraphExtensionProcessorBase(MediaGraphProcessor): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. + :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. + :param image: Required. Describes the parameters of the image that is sent as input to the + endpoint. :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :param sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'endpoint': {'required': True}, + 'image': {'required': True}, } _attribute_map = { @@ -318,6 +327,7 @@ class MediaGraphExtensionProcessorBase(MediaGraphProcessor): 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'sampling_options': {'key': 'samplingOptions', 'type': 'MediaGraphSamplingOptions'}, } _subtype_map = { @@ -329,14 +339,16 @@ def __init__( *, name: str, inputs: List["MediaGraphNodeInput"], - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, + endpoint: "MediaGraphEndpoint", + image: "MediaGraphImage", + sampling_options: Optional["MediaGraphSamplingOptions"] = None, **kwargs ): super(MediaGraphExtensionProcessorBase, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str self.endpoint = endpoint self.image = image + self.sampling_options = sampling_options class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): @@ -351,16 +363,22 @@ class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBas :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. + :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. + :param image: Required. Describes the parameters of the image that is sent as input to the + endpoint. :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :param sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'endpoint': {'required': True}, + 'image': {'required': True}, } _attribute_map = { @@ -369,6 +387,7 @@ class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBas 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'sampling_options': {'key': 'samplingOptions', 'type': 'MediaGraphSamplingOptions'}, } def __init__( @@ -376,11 +395,12 @@ def __init__( *, name: str, inputs: List["MediaGraphNodeInput"], - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, + endpoint: "MediaGraphEndpoint", + image: "MediaGraphImage", + sampling_options: Optional["MediaGraphSamplingOptions"] = None, **kwargs ): - super(MediaGraphCognitiveServicesVisionExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + super(MediaGraphCognitiveServicesVisionExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, sampling_options=sampling_options, **kwargs) self.type = '#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension' # type: str @@ -472,23 +492,33 @@ class MediaGraphFileSink(MediaGraphSink): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param file_path_pattern: Required. Absolute file path pattern for creating new files on the - Edge device. - :type file_path_pattern: str + :param base_directory_path: Required. Absolute directory for all outputs to the Edge device + from this sink. + :type base_directory_path: str + :param file_name_pattern: Required. File name pattern for creating new files on the Edge + device. + :type file_name_pattern: str + :param maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing + files from this sink. + :type maximum_size_mi_b: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, - 'file_path_pattern': {'required': True, 'min_length': 1}, + 'base_directory_path': {'required': True}, + 'file_name_pattern': {'required': True}, + 'maximum_size_mi_b': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, + 'base_directory_path': {'key': 'baseDirectoryPath', 'type': 'str'}, + 'file_name_pattern': {'key': 'fileNamePattern', 'type': 'str'}, + 'maximum_size_mi_b': {'key': 'maximumSizeMiB', 'type': 'str'}, } def __init__( @@ -496,55 +526,16 @@ def __init__( *, name: str, inputs: List["MediaGraphNodeInput"], - file_path_pattern: str, + base_directory_path: str, + file_name_pattern: str, + maximum_size_mi_b: str, **kwargs ): super(MediaGraphFileSink, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str - self.file_path_pattern = file_path_pattern - - -class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): - """Limits the frame rate on the input video stream based on the maximumFps property. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not - exceed this limit. - :type maximum_fps: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - maximum_fps: Optional[str] = None, - **kwargs - ): - super(MediaGraphFrameRateFilterProcessor, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str - self.maximum_fps = maximum_fps + self.base_directory_path = base_directory_path + self.file_name_pattern = file_name_pattern + self.maximum_size_mi_b = maximum_size_mi_b class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): @@ -559,19 +550,27 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. + :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. + :param image: Required. Describes the parameters of the image that is sent as input to the + endpoint. :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :param sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions :param data_transfer: Required. How media should be transferred to the inferencing engine. :type data_transfer: ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransfer + :param extension_configuration: Optional configuration to pass to the gRPC extension. + :type extension_configuration: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'endpoint': {'required': True}, + 'image': {'required': True}, 'data_transfer': {'required': True}, } @@ -581,7 +580,9 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'sampling_options': {'key': 'samplingOptions', 'type': 'MediaGraphSamplingOptions'}, 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, + 'extension_configuration': {'key': 'extensionConfiguration', 'type': 'str'}, } def __init__( @@ -589,14 +590,17 @@ def __init__( *, name: str, inputs: List["MediaGraphNodeInput"], + endpoint: "MediaGraphEndpoint", + image: "MediaGraphImage", data_transfer: "MediaGraphGrpcExtensionDataTransfer", - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, + sampling_options: Optional["MediaGraphSamplingOptions"] = None, + extension_configuration: Optional[str] = None, **kwargs ): - super(MediaGraphGrpcExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + super(MediaGraphGrpcExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, sampling_options=sampling_options, **kwargs) self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str self.data_transfer = data_transfer + self.extension_configuration = extension_configuration class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): @@ -605,7 +609,7 @@ class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if - mode is SharedMemory. Should not be specificed otherwise. + mode is SharedMemory. Should not be specified otherwise. :type shared_memory_size_mi_b: str :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible values include: "Embedded", "SharedMemory". @@ -646,16 +650,22 @@ class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. + :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. + :param image: Required. Describes the parameters of the image that is sent as input to the + endpoint. :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :param sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'endpoint': {'required': True}, + 'image': {'required': True}, } _attribute_map = { @@ -664,6 +674,7 @@ class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'sampling_options': {'key': 'samplingOptions', 'type': 'MediaGraphSamplingOptions'}, } def __init__( @@ -671,11 +682,12 @@ def __init__( *, name: str, inputs: List["MediaGraphNodeInput"], - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, + endpoint: "MediaGraphEndpoint", + image: "MediaGraphImage", + sampling_options: Optional["MediaGraphSamplingOptions"] = None, **kwargs ): - super(MediaGraphHttpExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + super(MediaGraphHttpExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, sampling_options=sampling_options, **kwargs) self.type = '#Microsoft.Media.MediaGraphHttpExtension' # type: str @@ -688,7 +700,8 @@ class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): :type type: str :param header_name: Required. HTTP header name. :type header_name: str - :param header_value: Required. HTTP header value. + :param header_value: Required. HTTP header value. Please use a parameter so that the actual + value is not returned on PUT or GET requests. :type header_value: str """ @@ -747,7 +760,7 @@ class MediaGraphImageFormat(msrest.serialization.Model): """Encoding settings for an image. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. + sub-classes are: MediaGraphImageFormatBmp, MediaGraphImageFormatJpeg, MediaGraphImageFormatPng, MediaGraphImageFormatRaw. All required parameters must be populated in order to send to Azure. @@ -764,7 +777,7 @@ class MediaGraphImageFormat(msrest.serialization.Model): } _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} + 'type': {'#Microsoft.Media.MediaGraphImageFormatBmp': 'MediaGraphImageFormatBmp', '#Microsoft.Media.MediaGraphImageFormatJpeg': 'MediaGraphImageFormatJpeg', '#Microsoft.Media.MediaGraphImageFormatPng': 'MediaGraphImageFormatPng', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} } def __init__( @@ -775,19 +788,39 @@ def __init__( self.type = None # type: Optional[str] -class MediaGraphImageFormatEncoded(MediaGraphImageFormat): - """Allowed formats for the image. +class MediaGraphImageFormatBmp(MediaGraphImageFormat): + """Encoding settings for Bmp images. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param encoding: The different encoding formats that can be used for the image. Possible values - include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". - :type encoding: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphImageEncodingFormat - :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best - quality). + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormatBmp, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatBmp' # type: str + + +class MediaGraphImageFormatJpeg(MediaGraphImageFormat): + """Encoding settings for Jpeg images. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param quality: The image quality. Value must be between 0 to 100 (best quality). :type quality: str """ @@ -797,23 +830,45 @@ class MediaGraphImageFormatEncoded(MediaGraphImageFormat): _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, - 'encoding': {'key': 'encoding', 'type': 'str'}, 'quality': {'key': 'quality', 'type': 'str'}, } def __init__( self, *, - encoding: Optional[Union[str, "MediaGraphImageEncodingFormat"]] = "Jpeg", quality: Optional[str] = None, **kwargs ): - super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str - self.encoding = encoding + super(MediaGraphImageFormatJpeg, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatJpeg' # type: str self.quality = quality +class MediaGraphImageFormatPng(MediaGraphImageFormat): + """Encoding settings for Png images. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormatPng, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatPng' # type: str + + class MediaGraphImageFormatRaw(MediaGraphImageFormat): """Encoding settings for raw images. @@ -821,7 +876,7 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", + :param pixel_format: Required. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". :type pixel_format: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormatRawPixelFormat @@ -829,6 +884,7 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): _validation = { 'type': {'required': True}, + 'pixel_format': {'required': True}, } _attribute_map = { @@ -839,7 +895,7 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): def __init__( self, *, - pixel_format: Optional[Union[str, "MediaGraphImageFormatRawPixelFormat"]] = None, + pixel_format: Union[str, "MediaGraphImageFormatRawPixelFormat"], **kwargs ): super(MediaGraphImageFormatRaw, self).__init__(**kwargs) @@ -850,8 +906,11 @@ def __init__( class MediaGraphImageScale(msrest.serialization.Model): """The scaling mode for the image. - :param mode: Describes the modes for scaling an input video frame into an image, before it is - sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". + All required parameters must be populated in order to send to Azure. + + :param mode: Required. Describes the modes for scaling an input video frame into an image, + before it is sent to an inference engine. Possible values include: "PreserveAspectRatio", + "Pad", "Stretch". :type mode: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScaleMode :param width: The desired output width of the image. :type width: str @@ -859,6 +918,10 @@ class MediaGraphImageScale(msrest.serialization.Model): :type height: str """ + _validation = { + 'mode': {'required': True}, + } + _attribute_map = { 'mode': {'key': 'mode', 'type': 'str'}, 'width': {'key': 'width', 'type': 'str'}, @@ -868,7 +931,7 @@ class MediaGraphImageScale(msrest.serialization.Model): def __init__( self, *, - mode: Optional[Union[str, "MediaGraphImageScaleMode"]] = None, + mode: Union[str, "MediaGraphImageScaleMode"], width: Optional[str] = None, height: Optional[str] = None, **kwargs @@ -880,15 +943,16 @@ def __init__( class MediaGraphInstance(msrest.serialization.Model): - """Represents a Media Graph instance. + """Represents an instance of a media graph. All required parameters must be populated in order to send to Azure. - :param name: Required. name. + :param name: Required. :type name: str - :param system_data: Graph system data. + :param system_data: The system data for a resource. This is used by both topologies and + instances. :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. + :param properties: Properties of a media graph instance. :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties """ @@ -925,7 +989,7 @@ class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -943,7 +1007,7 @@ class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -956,13 +1020,13 @@ def __init__( class MediaGraphInstanceCollection(msrest.serialization.Model): - """Collection of graph instances. + """A collection of media graph instances. - :param value: Collection of graph instances. + :param value: A collection of media graph instances. :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphInstance] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph instance collection (when the collection contains too many results to return in one - response). + :param continuation_token: A continuation token to use in subsequent calls to enumerate through + the graph instance collection. This is used when the collection contains too many results to + return in one response. :type continuation_token: str """ @@ -992,7 +1056,7 @@ class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -1010,7 +1074,7 @@ class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1031,7 +1095,7 @@ class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -1049,7 +1113,7 @@ class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1070,7 +1134,7 @@ class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -1088,7 +1152,7 @@ class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1100,14 +1164,14 @@ def __init__( self.method_name = 'GraphInstanceGet' # type: str -class MediaGraphInstanceListRequest(OperationBase): +class MediaGraphInstanceListRequest(MethodRequest): """MediaGraphInstanceListRequest. Variables are only populated by the server, and will be ignored when sending a request. :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ @@ -1121,7 +1185,7 @@ class MediaGraphInstanceListRequest(OperationBase): 'api_version': {'key': '@apiVersion', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1132,17 +1196,17 @@ def __init__( class MediaGraphInstanceProperties(msrest.serialization.Model): - """Properties of a Media Graph instance. + """Properties of a media graph instance. :param description: An optional description for the instance. :type description: str - :param topology_name: The name of the graph topology that this instance will run. A topology - with this name should already have been set in the Edge module. + :param topology_name: The name of the media graph topology that this instance will run. A + topology with this name should already have been set in the Edge module. :type topology_name: str :param parameters: List of one or more graph instance parameters. :type parameters: list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDefinition] - :param state: Allowed states for a graph Instance. Possible values include: "Inactive", + :param state: Allowed states for a graph instance. Possible values include: "Inactive", "Activating", "Active", "Deactivating". :type state: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceState """ @@ -1170,7 +1234,7 @@ def __init__( self.state = state -class MediaGraphInstanceSetRequest(OperationBase): +class MediaGraphInstanceSetRequest(MethodRequest): """MediaGraphInstanceSetRequest. Variables are only populated by the server, and will be ignored when sending a request. @@ -1179,9 +1243,9 @@ class MediaGraphInstanceSetRequest(OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param instance: Required. Represents a Media Graph instance. + :param instance: Required. Represents an instance of a media graph. :type instance: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstance """ @@ -1197,7 +1261,7 @@ class MediaGraphInstanceSetRequest(OperationBase): 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1210,7 +1274,7 @@ def __init__( self.instance = instance -class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): +class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): """MediaGraphInstanceSetRequestBody. Variables are only populated by the server, and will be ignored when sending a request. @@ -1219,13 +1283,14 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param name: Required. name. + :param name: Required. :type name: str - :param system_data: Graph system data. + :param system_data: The system data for a resource. This is used by both topologies and + instances. :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. + :param properties: Properties of a media graph instance. :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties """ @@ -1243,7 +1308,7 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1262,7 +1327,7 @@ def __init__( class MediaGraphIoTHubMessageSink(MediaGraphSink): - """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. + """Enables a media graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. All required parameters must be populated in order to send to Azure. @@ -1273,9 +1338,9 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param hub_output_name: Name of the output path to which the graph will publish message. These - messages can then be delivered to desired destinations by declaring routes referencing the - output path in the IoT Edge deployment manifest. + :param hub_output_name: Required. Name of the output path to which the media graph will publish + message. These messages can then be delivered to desired destinations by declaring routes + referencing the output path in the IoT Edge deployment manifest. :type hub_output_name: str """ @@ -1283,6 +1348,7 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'hub_output_name': {'required': True}, } _attribute_map = { @@ -1297,7 +1363,7 @@ def __init__( *, name: str, inputs: List["MediaGraphNodeInput"], - hub_output_name: Optional[str] = None, + hub_output_name: str, **kwargs ): super(MediaGraphIoTHubMessageSink, self).__init__(name=name, inputs=inputs, **kwargs) @@ -1306,7 +1372,7 @@ def __init__( class MediaGraphSource(msrest.serialization.Model): - """Media graph source. + """A source node in a media graph. You probably want to use the sub-classes and not this class directly. Known sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. @@ -1346,7 +1412,7 @@ def __init__( class MediaGraphIoTHubMessageSource(MediaGraphSource): - """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. + """Enables a media graph to receive messages via routes declared in the IoT Edge deployment manifest. All required parameters must be populated in order to send to Azure. @@ -1402,6 +1468,8 @@ class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): :param output_motion_region: Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true. :type output_motion_region: bool + :param event_aggregation_window: Event aggregation window duration, or 0 for no aggregation. + :type event_aggregation_window: str """ _validation = { @@ -1416,6 +1484,7 @@ class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, + 'event_aggregation_window': {'key': 'eventAggregationWindow', 'type': 'str'}, } def __init__( @@ -1425,12 +1494,14 @@ def __init__( inputs: List["MediaGraphNodeInput"], sensitivity: Optional[Union[str, "MediaGraphMotionDetectionSensitivity"]] = None, output_motion_region: Optional[bool] = None, + event_aggregation_window: Optional[str] = None, **kwargs ): super(MediaGraphMotionDetectionProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str self.sensitivity = sensitivity self.output_motion_region = output_motion_region + self.event_aggregation_window = event_aggregation_window class MediaGraphNodeInput(msrest.serialization.Model): @@ -1500,19 +1571,19 @@ def __init__( class MediaGraphParameterDeclaration(msrest.serialization.Model): - """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + """The declaration of a parameter in the media graph topology. A media graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the parameter. :type name: str - :param type: Required. name. Possible values include: "String", "SecretString", "Int", - "Double", "Bool". + :param type: Required. Possible values include: "String", "SecretString", "Int", "Double", + "Bool". :type type: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphParameterType :param description: Description of the parameter. :type description: str - :param default: The default value for the parameter, to be used if the graph instance does not - specify a value. + :param default: The default value for the parameter to be used if the media graph instance does + not specify a value. :type default: str """ @@ -1545,11 +1616,11 @@ def __init__( class MediaGraphParameterDefinition(msrest.serialization.Model): - """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + """A key-value pair. A media graph topology allows certain values to be parameterized. When an instance is created, the parameters are supplied with arguments specific to that instance. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. All required parameters must be populated in order to send to Azure. - :param name: Required. Name of parameter as defined in the graph topology. + :param name: Required. Name of parameter as defined in the media graph topology. :type name: str :param value: Required. Value of parameter. :type value: str @@ -1610,7 +1681,7 @@ def __init__( class MediaGraphRtspSource(MediaGraphSource): - """Enables a graph to capture media from a RTSP server. + """Enables a media graph to capture media from a RTSP server. All required parameters must be populated in order to send to Azure. @@ -1653,6 +1724,33 @@ def __init__( self.endpoint = endpoint +class MediaGraphSamplingOptions(msrest.serialization.Model): + """Describes the properties of a sample. + + :param skip_samples_without_annotation: If true, limits the samples submitted to the extension + to only samples which have associated inference(s). + :type skip_samples_without_annotation: str + :param maximum_samples_per_second: Maximum rate of samples submitted to the extension. + :type maximum_samples_per_second: str + """ + + _attribute_map = { + 'skip_samples_without_annotation': {'key': 'skipSamplesWithoutAnnotation', 'type': 'str'}, + 'maximum_samples_per_second': {'key': 'maximumSamplesPerSecond', 'type': 'str'}, + } + + def __init__( + self, + *, + skip_samples_without_annotation: Optional[str] = None, + maximum_samples_per_second: Optional[str] = None, + **kwargs + ): + super(MediaGraphSamplingOptions, self).__init__(**kwargs) + self.skip_samples_without_annotation = skip_samples_without_annotation + self.maximum_samples_per_second = maximum_samples_per_second + + class MediaGraphSignalGateProcessor(MediaGraphProcessor): """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. @@ -1666,17 +1764,17 @@ class MediaGraphSignalGateProcessor(MediaGraphProcessor): outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param activation_evaluation_window: The period of time over which the gate gathers input - events, before evaluating them. + events before evaluating them. :type activation_evaluation_window: str - :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It - is an offset between the time the event is received, and the timestamp of the first media - sample (eg. video frame) that is allowed through by the gate. + :param activation_signal_offset: Required. Signal offset once the gate is activated (can be + negative). It is an offset between the time the event is received, and the timestamp of the + first media sample (eg. video frame) that is allowed through by the gate. :type activation_signal_offset: str - :param minimum_activation_time: The minimum period for which the gate remains open, in the - absence of subsequent triggers (events). + :param minimum_activation_time: Required. The minimum period for which the gate remains open in + the absence of subsequent triggers (events). :type minimum_activation_time: str - :param maximum_activation_time: The maximum period for which the gate remains open, in the - presence of subsequent events. + :param maximum_activation_time: Required. The maximum period for which the gate remains open in + the presence of subsequent events. :type maximum_activation_time: str """ @@ -1684,6 +1782,9 @@ class MediaGraphSignalGateProcessor(MediaGraphProcessor): 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'activation_signal_offset': {'required': True}, + 'minimum_activation_time': {'required': True}, + 'maximum_activation_time': {'required': True}, } _attribute_map = { @@ -1701,10 +1802,10 @@ def __init__( *, name: str, inputs: List["MediaGraphNodeInput"], + activation_signal_offset: str, + minimum_activation_time: str, + maximum_activation_time: str, activation_evaluation_window: Optional[str] = None, - activation_signal_offset: Optional[str] = None, - minimum_activation_time: Optional[str] = None, - maximum_activation_time: Optional[str] = None, **kwargs ): super(MediaGraphSignalGateProcessor, self).__init__(name=name, inputs=inputs, **kwargs) @@ -1716,7 +1817,7 @@ def __init__( class MediaGraphSystemData(msrest.serialization.Model): - """Graph system data. + """The system data for a resource. This is used by both topologies and instances. :param created_at: The timestamp of resource creation (UTC). :type created_at: ~datetime.datetime @@ -1742,7 +1843,7 @@ def __init__( class MediaGraphTlsEndpoint(MediaGraphEndpoint): - """An endpoint that the graph can connect to, which must be connected over TLS/SSL. + """A TLS endpoint for media graph external connections. All required parameters must be populated in order to send to Azure. @@ -1818,15 +1919,16 @@ def __init__( class MediaGraphTopology(msrest.serialization.Model): - """Describes a graph topology. + """A description of a media graph topology. All required parameters must be populated in order to send to Azure. - :param name: Required. name. + :param name: Required. :type name: str - :param system_data: Graph system data. + :param system_data: The system data for a resource. This is used by both topologies and + instances. :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. + :param properties: A description of the properties of a media graph topology. :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties """ @@ -1855,13 +1957,13 @@ def __init__( class MediaGraphTopologyCollection(msrest.serialization.Model): - """Collection of graph topologies. + """A collection of media graph topologies. - :param value: Collection of graph topologies. + :param value: A collection of media graph topologies. :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphTopology] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph topologies collection (when the collection contains too many results to return in one - response). + :param continuation_token: A continuation token to use in subsequent calls to enumerate through + the graph topologies collection. This is used when the collection contains too many results to + return in one response. :type continuation_token: str """ @@ -1891,7 +1993,7 @@ class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -1909,7 +2011,7 @@ class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1930,7 +2032,7 @@ class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -1948,7 +2050,7 @@ class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1960,14 +2062,14 @@ def __init__( self.method_name = 'GraphTopologyGet' # type: str -class MediaGraphTopologyListRequest(OperationBase): +class MediaGraphTopologyListRequest(MethodRequest): """MediaGraphTopologyListRequest. Variables are only populated by the server, and will be ignored when sending a request. :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ @@ -1981,7 +2083,7 @@ class MediaGraphTopologyListRequest(OperationBase): 'api_version': {'key': '@apiVersion', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1992,18 +2094,18 @@ def __init__( class MediaGraphTopologyProperties(msrest.serialization.Model): - """Describes the properties of a graph topology. + """A description of the properties of a media graph topology. - :param description: An optional description for the instance. + :param description: :type description: str - :param parameters: An optional description for the instance. + :param parameters: :type parameters: list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDeclaration] - :param sources: An optional description for the instance. + :param sources: :type sources: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSource] - :param processors: An optional description for the instance. + :param processors: :type processors: list[~azure.media.livevideoanalytics.edge.models.MediaGraphProcessor] - :param sinks: name. + :param sinks: :type sinks: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSink] """ @@ -2033,7 +2135,7 @@ def __init__( self.sinks = sinks -class MediaGraphTopologySetRequest(OperationBase): +class MediaGraphTopologySetRequest(MethodRequest): """MediaGraphTopologySetRequest. Variables are only populated by the server, and will be ignored when sending a request. @@ -2042,9 +2144,9 @@ class MediaGraphTopologySetRequest(OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param graph: Required. Describes a graph topology. + :param graph: Required. A description of a media graph topology. :type graph: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopology """ @@ -2060,7 +2162,7 @@ class MediaGraphTopologySetRequest(OperationBase): 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -2073,7 +2175,7 @@ def __init__( self.graph = graph -class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): +class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): """MediaGraphTopologySetRequestBody. Variables are only populated by the server, and will be ignored when sending a request. @@ -2082,13 +2184,14 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param name: Required. name. + :param name: Required. :type name: str - :param system_data: Graph system data. + :param system_data: The system data for a resource. This is used by both topologies and + instances. :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. + :param properties: A description of the properties of a media graph topology. :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties """ @@ -2106,7 +2209,7 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -2168,7 +2271,8 @@ class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): :type type: str :param username: Required. Username for a username/password pair. :type username: str - :param password: Password for a username/password pair. + :param password: Password for a username/password pair. Please use a parameter so that the + actual value is not returned on PUT or GET requests. :type password: str """ diff --git a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py index c89397b9c30a..5d4949c64960 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py @@ -6,12 +6,12 @@ from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult from datetime import time -device_id = "lva-sample-device" -module_d = "lvaEdge" -connection_string = os.getenv("IOTHUB_DEVICE_CONNECTION_STRING") +device_id = "enter-your-device-name" +module_d = "enter-your-module-name" +connection_string = "enter-your-connection-string" graph_instance_name = "graphInstance1" graph_topology_name = "graphTopology1" -graph_url = '"rtsp://sample-url-from-camera"' +graph_url = "rtsp://sample-url-from-camera" def build_graph_topology(): graph_properties = MediaGraphTopologyProperties() @@ -38,43 +38,45 @@ def build_graph_instance(): return graph_instance -def invoke_method(method): +def invoke_method_helper(method): direct_method = CloudToDeviceMethod(method_name=method.method_name, payload=method.serialize()) registry_manager = IoTHubRegistryManager(connection_string) - return registry_manager.invoke_device_module_method(device_id, module_d, direct_method) + payload = registry_manager.invoke_device_module_method(device_id, module_d, direct_method).payload + if payload is not None and 'error' in payload: + print(payload['error']) + return None + + return payload def main(): graph_topology = build_graph_topology() graph_instance = build_graph_instance() try: - set_graph = invoke_method(MediaGraphTopologySetRequest(graph=graph_topology)) - set_graph_result = MediaGraphTopology.deserialize(set_graph) - - list_graph = invoke_method(MediaGraphTopologyListRequest()) - list_graph_result = MediaGraphTopology.deserialize(list_graph) + set_graph_response = invoke_method_helper(MediaGraphTopologySetRequest(graph=graph_topology)) + + list_graph_response = invoke_method_helper(MediaGraphTopologyListRequest()) + if list_graph_response: + list_graph_result = MediaGraphTopologyCollection.deserialize(list_graph_response) - get_graph = invoke_method(MediaGraphTopologyGetRequest(name=graph_topology_name)) - get_graph_result = MediaGraphTopology.deserialize(get_graph) + get_graph_response = invoke_method_helper(MediaGraphTopologyGetRequest(name=graph_topology_name)) + if get_graph_response: + get_graph_result = MediaGraphTopology.deserialize(get_graph_response) - set_graph_instance = invoke_method(MediaGraphInstanceSetRequest(instance=graph_instance)) - set_graph_instance_result = MediaGraphInstance.deserialize(set_graph_instance) + set_graph_instance_response = invoke_method_helper(MediaGraphInstanceSetRequest(instance=graph_instance)) - activate_graph_instance = invoke_method(MediaGraphInstanceActivateRequest(name=graph_instance_name)) - activate_graph_instance_result = MediaGraphInstance.deserialize(activate_graph_instance) + activate_graph_instance_response = invoke_method_helper(MediaGraphInstanceActivateRequest(name=graph_instance_name)) - get_graph_instance = invoke_method(MediaGraphInstanceGetRequest(name=graph_instance_name)) - get_graph_instance_result = MediaGraphInstance.deserialize(get_graph_instance) + get_graph_instance_response = invoke_method_helper(MediaGraphInstanceGetRequest(name=graph_instance_name)) + if get_graph_instance_response: + get_graph_instance_result = MediaGraphInstance.deserialize(get_graph_instance_response) - deactivate_graph_instance = invoke_method(MediaGraphInstanceDeActivateRequest(name=graph_instance_name)) - deactivate_graph_instance_result = MediaGraphInstance.deserialize(deactivate_graph_instance) + deactivate_graph_instance_response = invoke_method_helper(MediaGraphInstanceDeActivateRequest(name=graph_instance_name)) - delete_graph_instance = invoke_method(MediaGraphInstanceDeleteRequest(name=graph_instance_name)) - delete_graph_instance_result = MediaGraphInstance.deserialize(delete_graph_instance) + delete_graph_instance_response = invoke_method_helper(MediaGraphInstanceDeleteRequest(name=graph_instance_name)) - delete_graph = invoke_method(MediaGraphTopologyDeleteRequest(name=graph_topology_name)) - delete_graph_result = MediaGraphTopology.deserialize(delete_graph) + delete_graph_response = invoke_method_helper(MediaGraphTopologyDeleteRequest(name=graph_topology_name)) except Exception as ex: print(ex) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md b/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md index 03aa0ca72f85..9d8808d4c738 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md @@ -10,7 +10,7 @@ autorest --v3 --python ## Settings ```yaml -require: C:\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md +require: <>Azure\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md output-folder: ../azure/media/livevideoanalytics/edge/_generated namespace: azure.media.livevideoanalytics.edge no-namespace-folders: true From 2cd1cd7d41b4c02b9708fc6e216b3cc38bf6d9b5 Mon Sep 17 00:00:00 2001 From: hivyas Date: Mon, 7 Dec 2020 11:34:32 -0800 Subject: [PATCH 22/64] updated generated code --- ...r_live_video_analyticson_io_tedge_enums.py | 6 +- .../edge/_generated/models/_models.py | 59 +++++++++++-------- .../edge/_generated/models/_models_py3.py | 59 +++++++++++-------- 3 files changed, 71 insertions(+), 53 deletions(-) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py index 8223cb77e4a2..eb8b24817ee0 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py @@ -27,13 +27,15 @@ def __getattr__(cls, name): class MediaGraphGrpcExtensionDataTransferMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """How frame data should be transmitted to the inferencing engine. + """How frame data should be transmitted to the inference engine. """ EMBEDDED = "Embedded" #: Frames are transferred embedded into the gRPC messages. SHARED_MEMORY = "SharedMemory" #: Frames are transferred through shared memory. class MediaGraphImageFormatRawPixelFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The pixel format that will be used to encode images. + """ YUV420_P = "Yuv420p" #: Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples). RGB565_BE = "Rgb565be" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian. @@ -81,6 +83,8 @@ class MediaGraphOutputSelectorOperator(with_metaclass(_CaseInsensitiveEnumMeta, IS_NOT = "isNot" #: A media type is not the same type or a subtype. class MediaGraphParameterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of the parameter. + """ STRING = "String" #: A string parameter value. SECRET_STRING = "SecretString" #: A string to hold sensitive information as parameter value. diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py index f49575de77b6..4b7f9bc7fcf7 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py @@ -102,7 +102,7 @@ class MediaGraphSink(msrest.serialization.Model): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param name: Required. Name to be used for the media graph sink. + :param name: Required. The name to be used for the media graph sink. :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. @@ -142,12 +142,14 @@ class MediaGraphAssetSink(MediaGraphSink): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param name: Required. Name to be used for the media graph sink. + :param name: Required. The name to be used for the media graph sink. :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param asset_name_pattern: Required. A name pattern when creating new assets. + :param asset_name_pattern: Required. A name pattern when creating new assets. The pattern must + include at least one system variable. See the documentation for available variables and + additional examples. :type asset_name_pattern: str :param segment_length: When writing media to an asset, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum @@ -452,7 +454,7 @@ class MediaGraphFileSink(MediaGraphSink): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param name: Required. Name to be used for the media graph sink. + :param name: Required. The name to be used for the media graph sink. :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. @@ -461,7 +463,8 @@ class MediaGraphFileSink(MediaGraphSink): from this sink. :type base_directory_path: str :param file_name_pattern: Required. File name pattern for creating new files on the Edge - device. + device. The pattern must include at least one system variable. See the documentation for + available variables and additional examples. :type file_name_pattern: str :param maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing files from this sink. @@ -517,7 +520,7 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions - :param data_transfer: Required. How media should be transferred to the inferencing engine. + :param data_transfer: Required. How media should be transferred to the inference engine. :type data_transfer: ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransfer :param extension_configuration: Optional configuration to pass to the gRPC extension. @@ -555,14 +558,14 @@ def __init__( class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): - """Describes how media should be transferred to the inferencing engine. + """Describes how media should be transferred to the inference engine. All required parameters must be populated in order to send to Azure. :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if mode is SharedMemory. Should not be specified otherwise. :type shared_memory_size_mi_b: str - :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible + :param mode: Required. How frame data should be transmitted to the inference engine. Possible values include: "Embedded", "SharedMemory". :type mode: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransferMode @@ -810,8 +813,9 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param pixel_format: Required. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", - "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". + :param pixel_format: Required. The pixel format that will be used to encode images. Possible + values include: "Yuv420p", "Rgb565be", "Rgb565le", "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", + "Argb", "Rgba", "Abgr", "Bgra". :type pixel_format: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormatRawPixelFormat """ @@ -875,7 +879,7 @@ class MediaGraphInstance(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param name: Required. + :param name: Required. The identifier for the media graph instance. :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. @@ -1191,7 +1195,7 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param name: Required. + :param name: Required. The identifier for the media graph instance. :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. @@ -1235,7 +1239,7 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param name: Required. Name to be used for the media graph sink. + :param name: Required. The name to be used for the media graph sink. :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. @@ -1458,8 +1462,8 @@ class MediaGraphParameterDeclaration(msrest.serialization.Model): :param name: Required. The name of the parameter. :type name: str - :param type: Required. Possible values include: "String", "SecretString", "Int", "Double", - "Bool". + :param type: Required. The type of the parameter. Possible values include: "String", + "SecretString", "Int", "Double", "Bool". :type type: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphParameterType :param description: Description of the parameter. :type description: str @@ -1496,9 +1500,10 @@ class MediaGraphParameterDefinition(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param name: Required. Name of parameter as defined in the media graph topology. + :param name: Required. The name of the parameter defined in the media graph topology. :type name: str - :param value: Required. Value of parameter. + :param value: Required. The value to supply for the named parameter defined in the media graph + topology. :type value: str """ @@ -1765,11 +1770,11 @@ def __init__( class MediaGraphTopology(msrest.serialization.Model): - """A description of a media graph topology. + """The definition of a media graph topology. All required parameters must be populated in order to send to Azure. - :param name: Required. + :param name: Required. The identifier for the media graph topology. :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. @@ -1931,16 +1936,18 @@ def __init__( class MediaGraphTopologyProperties(msrest.serialization.Model): """A description of the properties of a media graph topology. - :param description: + :param description: A description of a media graph topology. It is recommended to use this to + describe the expected use of the topology. :type description: str - :param parameters: + :param parameters: The list of parameters defined in the topology. The value for these + parameters are supplied by instances of this topology. :type parameters: list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDeclaration] - :param sources: + :param sources: The list of source nodes in this topology. :type sources: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSource] - :param processors: + :param processors: The list of processor nodes in this topology. :type processors: list[~azure.media.livevideoanalytics.edge.models.MediaGraphProcessor] - :param sinks: + :param sinks: The list of sink nodes in this topology. :type sinks: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSink] """ @@ -1975,7 +1982,7 @@ class MediaGraphTopologySetRequest(MethodRequest): :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param graph: Required. A description of a media graph topology. + :param graph: Required. The definition of a media graph topology. :type graph: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopology """ @@ -2013,7 +2020,7 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param name: Required. + :param name: Required. The identifier for the media graph topology. :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py index d96578ee0a08..9dc0d776b487 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py @@ -109,7 +109,7 @@ class MediaGraphSink(msrest.serialization.Model): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param name: Required. Name to be used for the media graph sink. + :param name: Required. The name to be used for the media graph sink. :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. @@ -152,12 +152,14 @@ class MediaGraphAssetSink(MediaGraphSink): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param name: Required. Name to be used for the media graph sink. + :param name: Required. The name to be used for the media graph sink. :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param asset_name_pattern: Required. A name pattern when creating new assets. + :param asset_name_pattern: Required. A name pattern when creating new assets. The pattern must + include at least one system variable. See the documentation for available variables and + additional examples. :type asset_name_pattern: str :param segment_length: When writing media to an asset, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum @@ -487,7 +489,7 @@ class MediaGraphFileSink(MediaGraphSink): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param name: Required. Name to be used for the media graph sink. + :param name: Required. The name to be used for the media graph sink. :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. @@ -496,7 +498,8 @@ class MediaGraphFileSink(MediaGraphSink): from this sink. :type base_directory_path: str :param file_name_pattern: Required. File name pattern for creating new files on the Edge - device. + device. The pattern must include at least one system variable. See the documentation for + available variables and additional examples. :type file_name_pattern: str :param maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing files from this sink. @@ -558,7 +561,7 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions - :param data_transfer: Required. How media should be transferred to the inferencing engine. + :param data_transfer: Required. How media should be transferred to the inference engine. :type data_transfer: ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransfer :param extension_configuration: Optional configuration to pass to the gRPC extension. @@ -604,14 +607,14 @@ def __init__( class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): - """Describes how media should be transferred to the inferencing engine. + """Describes how media should be transferred to the inference engine. All required parameters must be populated in order to send to Azure. :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if mode is SharedMemory. Should not be specified otherwise. :type shared_memory_size_mi_b: str - :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible + :param mode: Required. How frame data should be transmitted to the inference engine. Possible values include: "Embedded", "SharedMemory". :type mode: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransferMode @@ -876,8 +879,9 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param pixel_format: Required. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", - "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". + :param pixel_format: Required. The pixel format that will be used to encode images. Possible + values include: "Yuv420p", "Rgb565be", "Rgb565le", "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", + "Argb", "Rgba", "Abgr", "Bgra". :type pixel_format: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormatRawPixelFormat """ @@ -947,7 +951,7 @@ class MediaGraphInstance(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param name: Required. + :param name: Required. The identifier for the media graph instance. :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. @@ -1285,7 +1289,7 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param name: Required. + :param name: Required. The identifier for the media graph instance. :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. @@ -1333,7 +1337,7 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param name: Required. Name to be used for the media graph sink. + :param name: Required. The name to be used for the media graph sink. :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. @@ -1577,8 +1581,8 @@ class MediaGraphParameterDeclaration(msrest.serialization.Model): :param name: Required. The name of the parameter. :type name: str - :param type: Required. Possible values include: "String", "SecretString", "Int", "Double", - "Bool". + :param type: Required. The type of the parameter. Possible values include: "String", + "SecretString", "Int", "Double", "Bool". :type type: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphParameterType :param description: Description of the parameter. :type description: str @@ -1620,9 +1624,10 @@ class MediaGraphParameterDefinition(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param name: Required. Name of parameter as defined in the media graph topology. + :param name: Required. The name of the parameter defined in the media graph topology. :type name: str - :param value: Required. Value of parameter. + :param value: Required. The value to supply for the named parameter defined in the media graph + topology. :type value: str """ @@ -1919,11 +1924,11 @@ def __init__( class MediaGraphTopology(msrest.serialization.Model): - """A description of a media graph topology. + """The definition of a media graph topology. All required parameters must be populated in order to send to Azure. - :param name: Required. + :param name: Required. The identifier for the media graph topology. :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. @@ -2096,16 +2101,18 @@ def __init__( class MediaGraphTopologyProperties(msrest.serialization.Model): """A description of the properties of a media graph topology. - :param description: + :param description: A description of a media graph topology. It is recommended to use this to + describe the expected use of the topology. :type description: str - :param parameters: + :param parameters: The list of parameters defined in the topology. The value for these + parameters are supplied by instances of this topology. :type parameters: list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDeclaration] - :param sources: + :param sources: The list of source nodes in this topology. :type sources: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSource] - :param processors: + :param processors: The list of processor nodes in this topology. :type processors: list[~azure.media.livevideoanalytics.edge.models.MediaGraphProcessor] - :param sinks: + :param sinks: The list of sink nodes in this topology. :type sinks: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSink] """ @@ -2146,7 +2153,7 @@ class MediaGraphTopologySetRequest(MethodRequest): :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param graph: Required. A description of a media graph topology. + :param graph: Required. The definition of a media graph topology. :type graph: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopology """ @@ -2186,7 +2193,7 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param name: Required. + :param name: Required. The identifier for the media graph topology. :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. From 9d5573b4c4b94f643eefc38951a498a904eedf83 Mon Sep 17 00:00:00 2001 From: hivyas Date: Mon, 7 Dec 2020 18:59:48 -0800 Subject: [PATCH 23/64] updating swagger and import statement --- .../README.md | 3 + .../media/livevideoanalytics/edge/__init__.py | 65 ++++++++++++++++++- .../samples/sample_lva.py | 3 +- 3 files changed, 68 insertions(+), 3 deletions(-) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/README.md b/sdk/media/azure-media-livevideoanalytics-edge/README.md index 4ec628ab28e9..9e84ab1cd61e 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/README.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/README.md @@ -23,6 +23,9 @@ pip install azure-media-livevideoanalytics-edge * Python 2.7, or 3.5 or later is required to use this package. * You need an active [Azure subscription][azure_sub], and a [IoT device connection string][iot_device_connection_string] to use this package. +| SDK | LVA Edge Module | +|---|---| +| 1.0.0b1 | 2.0 | ### Creating a graph topology and making requests Please visit the [Examples](#examples) for starter code ## Key concepts diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py index 17fe4565d648..94a917bccd2b 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py @@ -1,7 +1,68 @@ __path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore -from azure.media.livevideoanalytics.edge._generated.models import (MediaGraphTopologySetRequestBody, -MediaGraphTopologySetRequest, MediaGraphInstanceSetRequest, MediaGraphInstanceSetRequestBody) +#from azure.media.livevideoanalytics.edge._generated.models import (MediaGraphTopologySetRequestBody, +#MediaGraphTopologySetRequest, MediaGraphInstanceSetRequest, MediaGraphInstanceSetRequestBody) +from ._generated.models import * + +__all__ = [ + "MethodRequest", + "ItemNonSetRequestBase", + "MediaGraphSink" + "MediaGraphAssetSink", + "MediaGraphCertificateSource", + "MediaGraphProcessor", + "MediaGraphExtensionProcessorBase", + "MediaGraphCognitiveServicesVisionExtension", + "MediaGraphCredentials", + "MediaGraphEndpoint", + "MediaGraphFileSink", + "MediaGraphGrpcExtension", + "MediaGraphGrpcExtensionDataTransfer", + "MediaGraphHttpExtension", + "MediaGraphHttpHeaderCredentials", + "MediaGraphImage", + "MediaGraphImageFormat", + "MediaGraphImageFormatBmp", + "MediaGraphImageFormatJpeg", + "MediaGraphImageFormatPng", + "MediaGraphImageFormatRaw", + "MediaGraphImageScale", + "MediaGraphInstance", + "MediaGraphInstanceActivateRequest", + "MediaGraphInstanceCollection", + "MediaGraphInstanceDeActivateRequest", + "MediaGraphInstanceDeleteRequest", + "MediaGraphInstanceGetRequest", + "MediaGraphInstanceListRequest", + "MediaGraphInstanceProperties", + "MediaGraphInstanceSetRequest", + "MediaGraphInstanceSetRequestBody", + "MediaGraphIoTHubMessageSink", + "MediaGraphSource", + "MediaGraphIoTHubMessageSource", + "MediaGraphMotionDetectionProcessor", + "MediaGraphNodeInput", + "MediaGraphOutputSelector", + "MediaGraphParameterDeclaration", + "MediaGraphParameterDefinition", + "MediaGraphPemCertificateList", + "MediaGraphRtspSource", + "MediaGraphSamplingOptions", + "MediaGraphSignalGateProcessor", + "MediaGraphSystemData", + "MediaGraphTlsEndpoint", + "MediaGraphTlsValidationOptions", + "MediaGraphTopology", + "MediaGraphTopologyCollection", + "MediaGraphTopologyDeleteRequest", + "MediaGraphTopologyGetRequest", + "MediaGraphTopologyListRequest", + "MediaGraphTopologyProperties", + "MediaGraphTopologySetRequest", + "MediaGraphTopologySetRequestBody", + "MediaGraphUnsecuredEndpoint", + "MediaGraphUsernamePasswordCredentials" +] def _OverrideTopologySetRequestSerialize(self): graph_body = MediaGraphTopologySetRequestBody(name=self.graph.name) graph_body.system_data = self.graph.system_data diff --git a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py index 5d4949c64960..f5fa934fb6fe 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py @@ -1,7 +1,8 @@ import json import os -from azure.media.livevideoanalytics.edge._generated.models import * +#from azure.media.livevideoanalytics.edge._generated.models import * +from azure.media.livevideoanalytics.edge import * from azure.iot.hub import IoTHubRegistryManager from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult from datetime import time From 5acee232eece1553fdbc0763141c4fdc8f138f33 Mon Sep 17 00:00:00 2001 From: Laurent Mazuel Date: Tue, 8 Dec 2020 09:25:47 -0800 Subject: [PATCH 24/64] Improve import system --- .../media/livevideoanalytics/edge/__init__.py | 66 +------------------ 1 file changed, 3 insertions(+), 63 deletions(-) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py index 94a917bccd2b..7f07d48526e2 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py @@ -1,68 +1,8 @@ -__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore -#from azure.media.livevideoanalytics.edge._generated.models import (MediaGraphTopologySetRequestBody, -#MediaGraphTopologySetRequest, MediaGraphInstanceSetRequest, MediaGraphInstanceSetRequestBody) - from ._generated.models import * +from ._generated import models + +__all__ = models.__all__ -__all__ = [ - "MethodRequest", - "ItemNonSetRequestBase", - "MediaGraphSink" - "MediaGraphAssetSink", - "MediaGraphCertificateSource", - "MediaGraphProcessor", - "MediaGraphExtensionProcessorBase", - "MediaGraphCognitiveServicesVisionExtension", - "MediaGraphCredentials", - "MediaGraphEndpoint", - "MediaGraphFileSink", - "MediaGraphGrpcExtension", - "MediaGraphGrpcExtensionDataTransfer", - "MediaGraphHttpExtension", - "MediaGraphHttpHeaderCredentials", - "MediaGraphImage", - "MediaGraphImageFormat", - "MediaGraphImageFormatBmp", - "MediaGraphImageFormatJpeg", - "MediaGraphImageFormatPng", - "MediaGraphImageFormatRaw", - "MediaGraphImageScale", - "MediaGraphInstance", - "MediaGraphInstanceActivateRequest", - "MediaGraphInstanceCollection", - "MediaGraphInstanceDeActivateRequest", - "MediaGraphInstanceDeleteRequest", - "MediaGraphInstanceGetRequest", - "MediaGraphInstanceListRequest", - "MediaGraphInstanceProperties", - "MediaGraphInstanceSetRequest", - "MediaGraphInstanceSetRequestBody", - "MediaGraphIoTHubMessageSink", - "MediaGraphSource", - "MediaGraphIoTHubMessageSource", - "MediaGraphMotionDetectionProcessor", - "MediaGraphNodeInput", - "MediaGraphOutputSelector", - "MediaGraphParameterDeclaration", - "MediaGraphParameterDefinition", - "MediaGraphPemCertificateList", - "MediaGraphRtspSource", - "MediaGraphSamplingOptions", - "MediaGraphSignalGateProcessor", - "MediaGraphSystemData", - "MediaGraphTlsEndpoint", - "MediaGraphTlsValidationOptions", - "MediaGraphTopology", - "MediaGraphTopologyCollection", - "MediaGraphTopologyDeleteRequest", - "MediaGraphTopologyGetRequest", - "MediaGraphTopologyListRequest", - "MediaGraphTopologyProperties", - "MediaGraphTopologySetRequest", - "MediaGraphTopologySetRequestBody", - "MediaGraphUnsecuredEndpoint", - "MediaGraphUsernamePasswordCredentials" -] def _OverrideTopologySetRequestSerialize(self): graph_body = MediaGraphTopologySetRequestBody(name=self.graph.name) graph_body.system_data = self.graph.system_data From 0909a8a436aa929d5c7d2d87e8add106d93a1901 Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 10 Dec 2020 09:42:59 -0800 Subject: [PATCH 25/64] regenerated using public swagger --- .../README.md | 7 ++- .../edge/_generated/models/_models.py | 62 ++++++++++--------- .../edge/_generated/models/_models_py3.py | 62 ++++++++++--------- 3 files changed, 72 insertions(+), 59 deletions(-) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/README.md b/sdk/media/azure-media-livevideoanalytics-edge/README.md index 9e84ab1cd61e..ee06ce1f1c4f 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/README.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/README.md @@ -22,10 +22,11 @@ pip install azure-media-livevideoanalytics-edge * Python 2.7, or 3.5 or later is required to use this package. * You need an active [Azure subscription][azure_sub], and a [IoT device connection string][iot_device_connection_string] to use this package. +* You will need to use the version of the SDK that corresponds to the version of the LVA Edge module you are using. -| SDK | LVA Edge Module | -|---|---| -| 1.0.0b1 | 2.0 | + | SDK | LVA Edge Module | + |---|---| + | 1.0.0b1 | 2.0 | ### Creating a graph topology and making requests Please visit the [Examples](#examples) for starter code ## Key concepts diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py index 4b7f9bc7fcf7..e4139c77881c 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py @@ -17,14 +17,16 @@ class MethodRequest(msrest.serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar method_name: method name.Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, } @@ -57,7 +59,7 @@ class ItemNonSetRequestBase(MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -66,7 +68,7 @@ class ItemNonSetRequestBase(MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -915,7 +917,7 @@ class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -924,7 +926,7 @@ class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -977,7 +979,7 @@ class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -986,7 +988,7 @@ class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1014,7 +1016,7 @@ class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1023,7 +1025,7 @@ class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1051,7 +1053,7 @@ class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1060,7 +1062,7 @@ class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1086,14 +1088,16 @@ class MediaGraphInstanceListRequest(MethodRequest): Variables are only populated by the server, and will be ignored when sending a request. - :ivar method_name: method name.Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, } @@ -1153,7 +1157,7 @@ class MediaGraphInstanceSetRequest(MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1162,7 +1166,7 @@ class MediaGraphInstanceSetRequest(MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'instance': {'required': True}, } @@ -1191,7 +1195,7 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1205,7 +1209,7 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1835,7 +1839,7 @@ class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1844,7 +1848,7 @@ class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1872,7 +1876,7 @@ class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1881,7 +1885,7 @@ class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1907,14 +1911,16 @@ class MediaGraphTopologyListRequest(MethodRequest): Variables are only populated by the server, and will be ignored when sending a request. - :ivar method_name: method name.Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, } @@ -1978,7 +1984,7 @@ class MediaGraphTopologySetRequest(MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1987,7 +1993,7 @@ class MediaGraphTopologySetRequest(MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'graph': {'required': True}, } @@ -2016,7 +2022,7 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -2030,7 +2036,7 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py index 9dc0d776b487..054dda46d4a6 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py @@ -22,14 +22,16 @@ class MethodRequest(msrest.serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar method_name: method name.Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, } @@ -62,7 +64,7 @@ class ItemNonSetRequestBase(MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -71,7 +73,7 @@ class ItemNonSetRequestBase(MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -991,7 +993,7 @@ class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1000,7 +1002,7 @@ class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1058,7 +1060,7 @@ class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1067,7 +1069,7 @@ class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1097,7 +1099,7 @@ class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1106,7 +1108,7 @@ class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1136,7 +1138,7 @@ class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1145,7 +1147,7 @@ class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1173,14 +1175,16 @@ class MediaGraphInstanceListRequest(MethodRequest): Variables are only populated by the server, and will be ignored when sending a request. - :ivar method_name: method name.Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, } @@ -1245,7 +1249,7 @@ class MediaGraphInstanceSetRequest(MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1254,7 +1258,7 @@ class MediaGraphInstanceSetRequest(MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'instance': {'required': True}, } @@ -1285,7 +1289,7 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1299,7 +1303,7 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1996,7 +2000,7 @@ class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -2005,7 +2009,7 @@ class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -2035,7 +2039,7 @@ class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -2044,7 +2048,7 @@ class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -2072,14 +2076,16 @@ class MediaGraphTopologyListRequest(MethodRequest): Variables are only populated by the server, and will be ignored when sending a request. - :ivar method_name: method name.Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, } @@ -2149,7 +2155,7 @@ class MediaGraphTopologySetRequest(MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -2158,7 +2164,7 @@ class MediaGraphTopologySetRequest(MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'graph': {'required': True}, } @@ -2189,7 +2195,7 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -2203,7 +2209,7 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } From b3d64451fbe8f00234779c05832d76a35ab7d8a5 Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 10 Dec 2020 15:27:21 -0800 Subject: [PATCH 26/64] renaming package and folder --- .../CHANGELOG.md | 0 .../MANIFEST.in | 0 .../README.md | 0 .../azure/__init__.py | 0 .../azure/media/__init__.py | 0 .../azure/media/analytics}/__init__.py | 0 .../azure/media/analytics}/edge/__init__.py | 0 .../azure/media/analytics}/edge/_generated/__init__.py | 0 .../azure/media/analytics}/edge/_generated/_version.py | 0 .../azure/media/analytics}/edge/_generated/models/__init__.py | 0 .../_direct_methodsfor_live_video_analyticson_io_tedge_enums.py | 0 .../azure/media/analytics}/edge/_generated/models/_models.py | 0 .../media/analytics}/edge/_generated/models/_models_py3.py | 0 .../azure/media/analytics}/edge/_generated/py.typed | 0 .../azure/media/analytics}/edge/_version.py | 0 .../dev_requirements.txt | 0 .../docs/DevTips.md | 0 .../samples/sample_lva.py | 2 +- .../sdk_packaging.toml | 0 .../setup.cfg | 0 .../setup.py | 2 +- .../swagger/autorest.md | 2 +- .../tests/conftest.py | 0 .../tests/test_app_config.py | 0 24 files changed, 3 insertions(+), 3 deletions(-) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/CHANGELOG.md (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/MANIFEST.in (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/README.md (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/azure/__init__.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/azure/media/__init__.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/__init__.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/__init__.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/_generated/__init__.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/_generated/_version.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/_generated/models/__init__.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/_generated/models/_models.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/_generated/models/_models_py3.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/_generated/py.typed (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/_version.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/dev_requirements.txt (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/docs/DevTips.md (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/samples/sample_lva.py (98%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/sdk_packaging.toml (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/setup.cfg (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/setup.py (98%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/swagger/autorest.md (76%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/tests/conftest.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/tests/test_app_config.py (100%) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/CHANGELOG.md b/sdk/media/azure-media-analyticsedge/CHANGELOG.md similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/CHANGELOG.md rename to sdk/media/azure-media-analyticsedge/CHANGELOG.md diff --git a/sdk/media/azure-media-livevideoanalytics-edge/MANIFEST.in b/sdk/media/azure-media-analyticsedge/MANIFEST.in similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/MANIFEST.in rename to sdk/media/azure-media-analyticsedge/MANIFEST.in diff --git a/sdk/media/azure-media-livevideoanalytics-edge/README.md b/sdk/media/azure-media-analyticsedge/README.md similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/README.md rename to sdk/media/azure-media-analyticsedge/README.md diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/__init__.py b/sdk/media/azure-media-analyticsedge/azure/__init__.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/__init__.py rename to sdk/media/azure-media-analyticsedge/azure/__init__.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/__init__.py b/sdk/media/azure-media-analyticsedge/azure/media/__init__.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/__init__.py rename to sdk/media/azure-media-analyticsedge/azure/media/__init__.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/__init__.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/__init__.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/__init__.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/__init__.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/__init__.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/__init__.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/__init__.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/__init__.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/__init__.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/__init__.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/_version.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/_version.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/_version.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/_version.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/__init__.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/__init__.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/__init__.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/__init__.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models_py3.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models_py3.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/py.typed b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/py.typed similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/py.typed rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/py.typed diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_version.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_version.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_version.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_version.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/dev_requirements.txt b/sdk/media/azure-media-analyticsedge/dev_requirements.txt similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/dev_requirements.txt rename to sdk/media/azure-media-analyticsedge/dev_requirements.txt diff --git a/sdk/media/azure-media-livevideoanalytics-edge/docs/DevTips.md b/sdk/media/azure-media-analyticsedge/docs/DevTips.md similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/docs/DevTips.md rename to sdk/media/azure-media-analyticsedge/docs/DevTips.md diff --git a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py b/sdk/media/azure-media-analyticsedge/samples/sample_lva.py similarity index 98% rename from sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py rename to sdk/media/azure-media-analyticsedge/samples/sample_lva.py index f5fa934fb6fe..139a349bbcac 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py +++ b/sdk/media/azure-media-analyticsedge/samples/sample_lva.py @@ -2,7 +2,7 @@ import json import os #from azure.media.livevideoanalytics.edge._generated.models import * -from azure.media.livevideoanalytics.edge import * +from azure.media.analytics.edge import * from azure.iot.hub import IoTHubRegistryManager from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult from datetime import time diff --git a/sdk/media/azure-media-livevideoanalytics-edge/sdk_packaging.toml b/sdk/media/azure-media-analyticsedge/sdk_packaging.toml similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/sdk_packaging.toml rename to sdk/media/azure-media-analyticsedge/sdk_packaging.toml diff --git a/sdk/media/azure-media-livevideoanalytics-edge/setup.cfg b/sdk/media/azure-media-analyticsedge/setup.cfg similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/setup.cfg rename to sdk/media/azure-media-analyticsedge/setup.cfg diff --git a/sdk/media/azure-media-livevideoanalytics-edge/setup.py b/sdk/media/azure-media-analyticsedge/setup.py similarity index 98% rename from sdk/media/azure-media-livevideoanalytics-edge/setup.py rename to sdk/media/azure-media-analyticsedge/setup.py index e1f1f3a85b11..a333424a5e35 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/setup.py +++ b/sdk/media/azure-media-analyticsedge/setup.py @@ -13,7 +13,7 @@ from setuptools import find_packages, setup # Change the PACKAGE_NAME only to change folder and different name -PACKAGE_NAME = "azure-media-livevideoanalytics-edge" +PACKAGE_NAME = "azure-media-analytics-edge" PACKAGE_PPRINT_NAME = "Azure Media Live Video Analytics Edge SDK" # a-b-c => a/b/c diff --git a/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md b/sdk/media/azure-media-analyticsedge/swagger/autorest.md similarity index 76% rename from sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md rename to sdk/media/azure-media-analyticsedge/swagger/autorest.md index 9d8808d4c738..23c7e8518044 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md +++ b/sdk/media/azure-media-analyticsedge/swagger/autorest.md @@ -10,7 +10,7 @@ autorest --v3 --python ## Settings ```yaml -require: <>Azure\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md +require: https://github.com/Azure/azure-rest-api-specs/blob/7b34c62199a8d84f7252dcb8b08c1b593ae65124/specification/mediaservices/data-plane/readme.md output-folder: ../azure/media/livevideoanalytics/edge/_generated namespace: azure.media.livevideoanalytics.edge no-namespace-folders: true diff --git a/sdk/media/azure-media-livevideoanalytics-edge/tests/conftest.py b/sdk/media/azure-media-analyticsedge/tests/conftest.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/tests/conftest.py rename to sdk/media/azure-media-analyticsedge/tests/conftest.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/tests/test_app_config.py b/sdk/media/azure-media-analyticsedge/tests/test_app_config.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/tests/test_app_config.py rename to sdk/media/azure-media-analyticsedge/tests/test_app_config.py From 1f0a22cc337e4a9a2662f20696e9c470ac07c9bd Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 10 Dec 2020 15:45:28 -0800 Subject: [PATCH 27/64] updating ci file with new folder name --- sdk/media/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/media/ci.yml b/sdk/media/ci.yml index a1e4e391ec07..54151e8c64ca 100644 --- a/sdk/media/ci.yml +++ b/sdk/media/ci.yml @@ -30,6 +30,6 @@ extends: Artifacts: - name: azure_mgmt_media safeName: azuremgmtmedia - - name: azure_media_livevideoanalytics_edge - safeName: azuremedialivevideoanalyticsedge + - name: azure_media_analyticsedge + safeName: azuremediaanalyticsedge From 618a51effc2c27753bf52acbff5a7227549a5364 Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 11 Dec 2020 10:09:03 -0800 Subject: [PATCH 28/64] updated package name correctly and test --- .../CHANGELOG.md | 0 .../MANIFEST.in | 0 .../README.md | 2 +- .../azure/__init__.py | 0 .../azure/media/__init__.py | 0 .../azure/media/analyticsedge}/__init__.py | 0 .../analyticsedge}/_generated/__init__.py | 0 .../analyticsedge}/_generated/_version.py | 0 .../_generated/models/__init__.py | 0 ...r_live_video_analyticson_io_tedge_enums.py | 0 .../_generated/models/_models.py | 124 ++++++++---------- .../_generated/models/_models_py3.py | 124 ++++++++---------- .../media/analyticsedge}/_generated/py.typed | 0 .../azure/media/analyticsedge}/_version.py | 0 .../dev_requirements.txt | 0 .../docs/DevTips.md | 0 .../samples/sample_lva.py | 2 +- .../sdk_packaging.toml | 0 .../setup.cfg | 0 .../setup.py | 5 +- .../swagger/autorest.md | 4 +- .../tests/conftest.py | 0 .../tests/test_build_graph_serialize.py | 23 ++++ .../azure/media/analytics/__init__.py | 1 - .../tests/test_app_config.py | 5 - 25 files changed, 145 insertions(+), 145 deletions(-) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/CHANGELOG.md (100%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/MANIFEST.in (100%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/README.md (99%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/azure/__init__.py (100%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/azure/media/__init__.py (100%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/__init__.py (100%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/_generated/__init__.py (100%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/_generated/_version.py (100%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/_generated/models/__init__.py (100%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py (100%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/_generated/models/_models.py (93%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/_generated/models/_models_py3.py (93%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/_generated/py.typed (100%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/_version.py (100%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/dev_requirements.txt (100%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/docs/DevTips.md (100%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/samples/sample_lva.py (99%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/sdk_packaging.toml (100%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/setup.cfg (100%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/setup.py (96%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/swagger/autorest.md (82%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/tests/conftest.py (100%) create mode 100644 sdk/media/azure-media-analytics-edge/tests/test_build_graph_serialize.py delete mode 100644 sdk/media/azure-media-analyticsedge/azure/media/analytics/__init__.py delete mode 100644 sdk/media/azure-media-analyticsedge/tests/test_app_config.py diff --git a/sdk/media/azure-media-analyticsedge/CHANGELOG.md b/sdk/media/azure-media-analytics-edge/CHANGELOG.md similarity index 100% rename from sdk/media/azure-media-analyticsedge/CHANGELOG.md rename to sdk/media/azure-media-analytics-edge/CHANGELOG.md diff --git a/sdk/media/azure-media-analyticsedge/MANIFEST.in b/sdk/media/azure-media-analytics-edge/MANIFEST.in similarity index 100% rename from sdk/media/azure-media-analyticsedge/MANIFEST.in rename to sdk/media/azure-media-analytics-edge/MANIFEST.in diff --git a/sdk/media/azure-media-analyticsedge/README.md b/sdk/media/azure-media-analytics-edge/README.md similarity index 99% rename from sdk/media/azure-media-analyticsedge/README.md rename to sdk/media/azure-media-analytics-edge/README.md index ee06ce1f1c4f..288e7704f76b 100644 --- a/sdk/media/azure-media-analyticsedge/README.md +++ b/sdk/media/azure-media-analytics-edge/README.md @@ -16,7 +16,7 @@ Use the client library for Live Video Analytics on IoT Edge to: Install the Live Video Analytics client library for Python with pip: ```bash -pip install azure-media-livevideoanalytics-edge +pip install azure-media-analytics-edge ``` ### Prerequisites diff --git a/sdk/media/azure-media-analyticsedge/azure/__init__.py b/sdk/media/azure-media-analytics-edge/azure/__init__.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/__init__.py rename to sdk/media/azure-media-analytics-edge/azure/__init__.py diff --git a/sdk/media/azure-media-analyticsedge/azure/media/__init__.py b/sdk/media/azure-media-analytics-edge/azure/media/__init__.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/media/__init__.py rename to sdk/media/azure-media-analytics-edge/azure/media/__init__.py diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/__init__.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/__init__.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/__init__.py rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/__init__.py diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/__init__.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/__init__.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/__init__.py rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/__init__.py diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/_version.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/_version.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/_version.py rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/_version.py diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/__init__.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/__init__.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/__init__.py rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/__init__.py diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/_models.py similarity index 93% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models.py rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/_models.py index e4139c77881c..d16abafbecc2 100644 --- a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models.py +++ b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/_models.py @@ -108,7 +108,7 @@ class MediaGraphSink(msrest.serialization.Model): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] """ _validation = { @@ -148,7 +148,7 @@ class MediaGraphAssetSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param asset_name_pattern: Required. A name pattern when creating new assets. The pattern must include at least one system variable. See the documentation for available variables and additional examples. @@ -243,7 +243,7 @@ class MediaGraphProcessor(msrest.serialization.Model): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] """ _validation = { @@ -286,15 +286,15 @@ class MediaGraphExtensionProcessorBase(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint :param image: Required. Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :type image: ~azure.media.analyticsedge.models.MediaGraphImage :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. - :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions + :type sampling_options: ~azure.media.analyticsedge.models.MediaGraphSamplingOptions """ _validation = { @@ -340,15 +340,15 @@ class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBas :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint :param image: Required. Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :type image: ~azure.media.analyticsedge.models.MediaGraphImage :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. - :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions + :type sampling_options: ~azure.media.analyticsedge.models.MediaGraphSamplingOptions """ _validation = { @@ -419,7 +419,7 @@ class MediaGraphEndpoint(msrest.serialization.Model): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.analyticsedge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str """ @@ -460,7 +460,7 @@ class MediaGraphFileSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param base_directory_path: Required. Absolute directory for all outputs to the Edge device from this sink. :type base_directory_path: str @@ -513,18 +513,17 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint :param image: Required. Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :type image: ~azure.media.analyticsedge.models.MediaGraphImage :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. - :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions + :type sampling_options: ~azure.media.analyticsedge.models.MediaGraphSamplingOptions :param data_transfer: Required. How media should be transferred to the inference engine. - :type data_transfer: - ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransfer + :type data_transfer: ~azure.media.analyticsedge.models.MediaGraphGrpcExtensionDataTransfer :param extension_configuration: Optional configuration to pass to the gRPC extension. :type extension_configuration: str """ @@ -569,8 +568,7 @@ class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): :type shared_memory_size_mi_b: str :param mode: Required. How frame data should be transmitted to the inference engine. Possible values include: "Embedded", "SharedMemory". - :type mode: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransferMode + :type mode: str or ~azure.media.analyticsedge.models.MediaGraphGrpcExtensionDataTransferMode """ _validation = { @@ -602,15 +600,15 @@ class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint :param image: Required. Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :type image: ~azure.media.analyticsedge.models.MediaGraphImage :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. - :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions + :type sampling_options: ~azure.media.analyticsedge.models.MediaGraphSamplingOptions """ _validation = { @@ -678,9 +676,9 @@ class MediaGraphImage(msrest.serialization.Model): """Describes the properties of an image frame. :param scale: The scaling mode for the image. - :type scale: ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScale + :type scale: ~azure.media.analyticsedge.models.MediaGraphImageScale :param format: Encoding settings for an image. - :type format: ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormat + :type format: ~azure.media.analyticsedge.models.MediaGraphImageFormat """ _attribute_map = { @@ -819,7 +817,7 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): values include: "Yuv420p", "Rgb565be", "Rgb565le", "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". :type pixel_format: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormatRawPixelFormat + ~azure.media.analyticsedge.models.MediaGraphImageFormatRawPixelFormat """ _validation = { @@ -849,7 +847,7 @@ class MediaGraphImageScale(msrest.serialization.Model): :param mode: Required. Describes the modes for scaling an input video frame into an image, before it is sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". - :type mode: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScaleMode + :type mode: str or ~azure.media.analyticsedge.models.MediaGraphImageScaleMode :param width: The desired output width of the image. :type width: str :param height: The desired output height of the image. @@ -885,9 +883,9 @@ class MediaGraphInstance(msrest.serialization.Model): :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. - :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.analyticsedge.models.MediaGraphSystemData :param properties: Properties of a media graph instance. - :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties + :type properties: ~azure.media.analyticsedge.models.MediaGraphInstanceProperties """ _validation = { @@ -951,7 +949,7 @@ class MediaGraphInstanceCollection(msrest.serialization.Model): """A collection of media graph instances. :param value: A collection of media graph instances. - :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphInstance] + :type value: list[~azure.media.analyticsedge.models.MediaGraphInstance] :param continuation_token: A continuation token to use in subsequent calls to enumerate through the graph instance collection. This is used when the collection contains too many results to return in one response. @@ -1125,11 +1123,10 @@ class MediaGraphInstanceProperties(msrest.serialization.Model): topology with this name should already have been set in the Edge module. :type topology_name: str :param parameters: List of one or more graph instance parameters. - :type parameters: - list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDefinition] + :type parameters: list[~azure.media.analyticsedge.models.MediaGraphParameterDefinition] :param state: Allowed states for a graph instance. Possible values include: "Inactive", "Activating", "Active", "Deactivating". - :type state: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceState + :type state: str or ~azure.media.analyticsedge.models.MediaGraphInstanceState """ _attribute_map = { @@ -1162,7 +1159,7 @@ class MediaGraphInstanceSetRequest(MethodRequest): :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param instance: Required. Represents an instance of a media graph. - :type instance: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstance + :type instance: ~azure.media.analyticsedge.models.MediaGraphInstance """ _validation = { @@ -1203,9 +1200,9 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. - :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.analyticsedge.models.MediaGraphSystemData :param properties: Properties of a media graph instance. - :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties + :type properties: ~azure.media.analyticsedge.models.MediaGraphInstanceProperties """ _validation = { @@ -1247,7 +1244,7 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param hub_output_name: Required. Name of the output path to which the media graph will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest. @@ -1361,11 +1358,11 @@ class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param sensitivity: Enumeration that specifies the sensitivity of the motion detection processor. Possible values include: "Low", "Medium", "High". :type sensitivity: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphMotionDetectionSensitivity + ~azure.media.analyticsedge.models.MediaGraphMotionDetectionSensitivity :param output_motion_region: Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true. :type output_motion_region: bool @@ -1406,8 +1403,7 @@ class MediaGraphNodeInput(msrest.serialization.Model): input to this node. :type node_name: str :param output_selectors: Allows for the selection of particular streams from another node. - :type output_selectors: - list[~azure.media.livevideoanalytics.edge.models.MediaGraphOutputSelector] + :type output_selectors: list[~azure.media.analyticsedge.models.MediaGraphOutputSelector] """ _attribute_map = { @@ -1432,8 +1428,7 @@ class MediaGraphOutputSelector(msrest.serialization.Model): :ivar property: The stream property to compare with. Default value: "mediaType". :vartype property: str :param operator: The operator to compare streams by. Possible values include: "is", "isNot". - :type operator: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphOutputSelectorOperator + :type operator: str or ~azure.media.analyticsedge.models.MediaGraphOutputSelectorOperator :param value: Value to compare against. :type value: str """ @@ -1468,7 +1463,7 @@ class MediaGraphParameterDeclaration(msrest.serialization.Model): :type name: str :param type: Required. The type of the parameter. Possible values include: "String", "SecretString", "Int", "Double", "Bool". - :type type: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphParameterType + :type type: str or ~azure.media.analyticsedge.models.MediaGraphParameterType :param description: Description of the parameter. :type description: str :param default: The default value for the parameter to be used if the media graph instance does @@ -1572,9 +1567,9 @@ class MediaGraphRtspSource(MediaGraphSource): :type name: str :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. Possible values include: "Http", "Tcp". - :type transport: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphRtspTransport + :type transport: str or ~azure.media.analyticsedge.models.MediaGraphRtspTransport :param endpoint: Required. RTSP endpoint of the stream that is being connected to. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint """ _validation = { @@ -1635,7 +1630,7 @@ class MediaGraphSignalGateProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param activation_evaluation_window: The period of time over which the gate gathers input events before evaluating them. :type activation_evaluation_window: str @@ -1713,17 +1708,15 @@ class MediaGraphTlsEndpoint(MediaGraphEndpoint): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.analyticsedge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null designates that Azure Media Service's source of trust should be used. - :type trusted_certificates: - ~azure.media.livevideoanalytics.edge.models.MediaGraphCertificateSource + :type trusted_certificates: ~azure.media.analyticsedge.models.MediaGraphCertificateSource :param validation_options: Validation options to use when authenticating a TLS connection. By default, strict validation is used. - :type validation_options: - ~azure.media.livevideoanalytics.edge.models.MediaGraphTlsValidationOptions + :type validation_options: ~azure.media.analyticsedge.models.MediaGraphTlsValidationOptions """ _validation = { @@ -1782,9 +1775,9 @@ class MediaGraphTopology(msrest.serialization.Model): :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. - :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.analyticsedge.models.MediaGraphSystemData :param properties: A description of the properties of a media graph topology. - :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties + :type properties: ~azure.media.analyticsedge.models.MediaGraphTopologyProperties """ _validation = { @@ -1811,7 +1804,7 @@ class MediaGraphTopologyCollection(msrest.serialization.Model): """A collection of media graph topologies. :param value: A collection of media graph topologies. - :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphTopology] + :type value: list[~azure.media.analyticsedge.models.MediaGraphTopology] :param continuation_token: A continuation token to use in subsequent calls to enumerate through the graph topologies collection. This is used when the collection contains too many results to return in one response. @@ -1947,14 +1940,13 @@ class MediaGraphTopologyProperties(msrest.serialization.Model): :type description: str :param parameters: The list of parameters defined in the topology. The value for these parameters are supplied by instances of this topology. - :type parameters: - list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDeclaration] + :type parameters: list[~azure.media.analyticsedge.models.MediaGraphParameterDeclaration] :param sources: The list of source nodes in this topology. - :type sources: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSource] + :type sources: list[~azure.media.analyticsedge.models.MediaGraphSource] :param processors: The list of processor nodes in this topology. - :type processors: list[~azure.media.livevideoanalytics.edge.models.MediaGraphProcessor] + :type processors: list[~azure.media.analyticsedge.models.MediaGraphProcessor] :param sinks: The list of sink nodes in this topology. - :type sinks: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSink] + :type sinks: list[~azure.media.analyticsedge.models.MediaGraphSink] """ _attribute_map = { @@ -1989,7 +1981,7 @@ class MediaGraphTopologySetRequest(MethodRequest): :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param graph: Required. The definition of a media graph topology. - :type graph: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopology + :type graph: ~azure.media.analyticsedge.models.MediaGraphTopology """ _validation = { @@ -2030,9 +2022,9 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. - :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.analyticsedge.models.MediaGraphSystemData :param properties: A description of the properties of a media graph topology. - :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties + :type properties: ~azure.media.analyticsedge.models.MediaGraphTopologyProperties """ _validation = { @@ -2071,7 +2063,7 @@ class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.analyticsedge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str """ diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/_models_py3.py similarity index 93% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models_py3.py rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/_models_py3.py index 054dda46d4a6..7542b26cb7dc 100644 --- a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models_py3.py +++ b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/_models_py3.py @@ -115,7 +115,7 @@ class MediaGraphSink(msrest.serialization.Model): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] """ _validation = { @@ -158,7 +158,7 @@ class MediaGraphAssetSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param asset_name_pattern: Required. A name pattern when creating new assets. The pattern must include at least one system variable. See the documentation for available variables and additional examples. @@ -260,7 +260,7 @@ class MediaGraphProcessor(msrest.serialization.Model): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] """ _validation = { @@ -306,15 +306,15 @@ class MediaGraphExtensionProcessorBase(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint :param image: Required. Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :type image: ~azure.media.analyticsedge.models.MediaGraphImage :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. - :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions + :type sampling_options: ~azure.media.analyticsedge.models.MediaGraphSamplingOptions """ _validation = { @@ -366,15 +366,15 @@ class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBas :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint :param image: Required. Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :type image: ~azure.media.analyticsedge.models.MediaGraphImage :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. - :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions + :type sampling_options: ~azure.media.analyticsedge.models.MediaGraphSamplingOptions """ _validation = { @@ -451,7 +451,7 @@ class MediaGraphEndpoint(msrest.serialization.Model): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.analyticsedge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str """ @@ -495,7 +495,7 @@ class MediaGraphFileSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param base_directory_path: Required. Absolute directory for all outputs to the Edge device from this sink. :type base_directory_path: str @@ -554,18 +554,17 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint :param image: Required. Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :type image: ~azure.media.analyticsedge.models.MediaGraphImage :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. - :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions + :type sampling_options: ~azure.media.analyticsedge.models.MediaGraphSamplingOptions :param data_transfer: Required. How media should be transferred to the inference engine. - :type data_transfer: - ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransfer + :type data_transfer: ~azure.media.analyticsedge.models.MediaGraphGrpcExtensionDataTransfer :param extension_configuration: Optional configuration to pass to the gRPC extension. :type extension_configuration: str """ @@ -618,8 +617,7 @@ class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): :type shared_memory_size_mi_b: str :param mode: Required. How frame data should be transmitted to the inference engine. Possible values include: "Embedded", "SharedMemory". - :type mode: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransferMode + :type mode: str or ~azure.media.analyticsedge.models.MediaGraphGrpcExtensionDataTransferMode """ _validation = { @@ -654,15 +652,15 @@ class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint :param image: Required. Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :type image: ~azure.media.analyticsedge.models.MediaGraphImage :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. - :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions + :type sampling_options: ~azure.media.analyticsedge.models.MediaGraphSamplingOptions """ _validation = { @@ -739,9 +737,9 @@ class MediaGraphImage(msrest.serialization.Model): """Describes the properties of an image frame. :param scale: The scaling mode for the image. - :type scale: ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScale + :type scale: ~azure.media.analyticsedge.models.MediaGraphImageScale :param format: Encoding settings for an image. - :type format: ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormat + :type format: ~azure.media.analyticsedge.models.MediaGraphImageFormat """ _attribute_map = { @@ -885,7 +883,7 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): values include: "Yuv420p", "Rgb565be", "Rgb565le", "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". :type pixel_format: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormatRawPixelFormat + ~azure.media.analyticsedge.models.MediaGraphImageFormatRawPixelFormat """ _validation = { @@ -917,7 +915,7 @@ class MediaGraphImageScale(msrest.serialization.Model): :param mode: Required. Describes the modes for scaling an input video frame into an image, before it is sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". - :type mode: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScaleMode + :type mode: str or ~azure.media.analyticsedge.models.MediaGraphImageScaleMode :param width: The desired output width of the image. :type width: str :param height: The desired output height of the image. @@ -957,9 +955,9 @@ class MediaGraphInstance(msrest.serialization.Model): :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. - :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.analyticsedge.models.MediaGraphSystemData :param properties: Properties of a media graph instance. - :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties + :type properties: ~azure.media.analyticsedge.models.MediaGraphInstanceProperties """ _validation = { @@ -1029,7 +1027,7 @@ class MediaGraphInstanceCollection(msrest.serialization.Model): """A collection of media graph instances. :param value: A collection of media graph instances. - :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphInstance] + :type value: list[~azure.media.analyticsedge.models.MediaGraphInstance] :param continuation_token: A continuation token to use in subsequent calls to enumerate through the graph instance collection. This is used when the collection contains too many results to return in one response. @@ -1212,11 +1210,10 @@ class MediaGraphInstanceProperties(msrest.serialization.Model): topology with this name should already have been set in the Edge module. :type topology_name: str :param parameters: List of one or more graph instance parameters. - :type parameters: - list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDefinition] + :type parameters: list[~azure.media.analyticsedge.models.MediaGraphParameterDefinition] :param state: Allowed states for a graph instance. Possible values include: "Inactive", "Activating", "Active", "Deactivating". - :type state: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceState + :type state: str or ~azure.media.analyticsedge.models.MediaGraphInstanceState """ _attribute_map = { @@ -1254,7 +1251,7 @@ class MediaGraphInstanceSetRequest(MethodRequest): :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param instance: Required. Represents an instance of a media graph. - :type instance: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstance + :type instance: ~azure.media.analyticsedge.models.MediaGraphInstance """ _validation = { @@ -1297,9 +1294,9 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. - :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.analyticsedge.models.MediaGraphSystemData :param properties: Properties of a media graph instance. - :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties + :type properties: ~azure.media.analyticsedge.models.MediaGraphInstanceProperties """ _validation = { @@ -1345,7 +1342,7 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param hub_output_name: Required. Name of the output path to which the media graph will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest. @@ -1468,11 +1465,11 @@ class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param sensitivity: Enumeration that specifies the sensitivity of the motion detection processor. Possible values include: "Low", "Medium", "High". :type sensitivity: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphMotionDetectionSensitivity + ~azure.media.analyticsedge.models.MediaGraphMotionDetectionSensitivity :param output_motion_region: Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true. :type output_motion_region: bool @@ -1519,8 +1516,7 @@ class MediaGraphNodeInput(msrest.serialization.Model): input to this node. :type node_name: str :param output_selectors: Allows for the selection of particular streams from another node. - :type output_selectors: - list[~azure.media.livevideoanalytics.edge.models.MediaGraphOutputSelector] + :type output_selectors: list[~azure.media.analyticsedge.models.MediaGraphOutputSelector] """ _attribute_map = { @@ -1548,8 +1544,7 @@ class MediaGraphOutputSelector(msrest.serialization.Model): :ivar property: The stream property to compare with. Default value: "mediaType". :vartype property: str :param operator: The operator to compare streams by. Possible values include: "is", "isNot". - :type operator: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphOutputSelectorOperator + :type operator: str or ~azure.media.analyticsedge.models.MediaGraphOutputSelectorOperator :param value: Value to compare against. :type value: str """ @@ -1587,7 +1582,7 @@ class MediaGraphParameterDeclaration(msrest.serialization.Model): :type name: str :param type: Required. The type of the parameter. Possible values include: "String", "SecretString", "Int", "Double", "Bool". - :type type: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphParameterType + :type type: str or ~azure.media.analyticsedge.models.MediaGraphParameterType :param description: Description of the parameter. :type description: str :param default: The default value for the parameter to be used if the media graph instance does @@ -1701,9 +1696,9 @@ class MediaGraphRtspSource(MediaGraphSource): :type name: str :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. Possible values include: "Http", "Tcp". - :type transport: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphRtspTransport + :type transport: str or ~azure.media.analyticsedge.models.MediaGraphRtspTransport :param endpoint: Required. RTSP endpoint of the stream that is being connected to. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint """ _validation = { @@ -1771,7 +1766,7 @@ class MediaGraphSignalGateProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param activation_evaluation_window: The period of time over which the gate gathers input events before evaluating them. :type activation_evaluation_window: str @@ -1859,17 +1854,15 @@ class MediaGraphTlsEndpoint(MediaGraphEndpoint): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.analyticsedge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null designates that Azure Media Service's source of trust should be used. - :type trusted_certificates: - ~azure.media.livevideoanalytics.edge.models.MediaGraphCertificateSource + :type trusted_certificates: ~azure.media.analyticsedge.models.MediaGraphCertificateSource :param validation_options: Validation options to use when authenticating a TLS connection. By default, strict validation is used. - :type validation_options: - ~azure.media.livevideoanalytics.edge.models.MediaGraphTlsValidationOptions + :type validation_options: ~azure.media.analyticsedge.models.MediaGraphTlsValidationOptions """ _validation = { @@ -1936,9 +1929,9 @@ class MediaGraphTopology(msrest.serialization.Model): :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. - :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.analyticsedge.models.MediaGraphSystemData :param properties: A description of the properties of a media graph topology. - :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties + :type properties: ~azure.media.analyticsedge.models.MediaGraphTopologyProperties """ _validation = { @@ -1969,7 +1962,7 @@ class MediaGraphTopologyCollection(msrest.serialization.Model): """A collection of media graph topologies. :param value: A collection of media graph topologies. - :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphTopology] + :type value: list[~azure.media.analyticsedge.models.MediaGraphTopology] :param continuation_token: A continuation token to use in subsequent calls to enumerate through the graph topologies collection. This is used when the collection contains too many results to return in one response. @@ -2112,14 +2105,13 @@ class MediaGraphTopologyProperties(msrest.serialization.Model): :type description: str :param parameters: The list of parameters defined in the topology. The value for these parameters are supplied by instances of this topology. - :type parameters: - list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDeclaration] + :type parameters: list[~azure.media.analyticsedge.models.MediaGraphParameterDeclaration] :param sources: The list of source nodes in this topology. - :type sources: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSource] + :type sources: list[~azure.media.analyticsedge.models.MediaGraphSource] :param processors: The list of processor nodes in this topology. - :type processors: list[~azure.media.livevideoanalytics.edge.models.MediaGraphProcessor] + :type processors: list[~azure.media.analyticsedge.models.MediaGraphProcessor] :param sinks: The list of sink nodes in this topology. - :type sinks: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSink] + :type sinks: list[~azure.media.analyticsedge.models.MediaGraphSink] """ _attribute_map = { @@ -2160,7 +2152,7 @@ class MediaGraphTopologySetRequest(MethodRequest): :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param graph: Required. The definition of a media graph topology. - :type graph: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopology + :type graph: ~azure.media.analyticsedge.models.MediaGraphTopology """ _validation = { @@ -2203,9 +2195,9 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. - :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.analyticsedge.models.MediaGraphSystemData :param properties: A description of the properties of a media graph topology. - :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties + :type properties: ~azure.media.analyticsedge.models.MediaGraphTopologyProperties """ _validation = { @@ -2248,7 +2240,7 @@ class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.analyticsedge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str """ diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/py.typed b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/py.typed similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/py.typed rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/py.typed diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_version.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_version.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_version.py rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_version.py diff --git a/sdk/media/azure-media-analyticsedge/dev_requirements.txt b/sdk/media/azure-media-analytics-edge/dev_requirements.txt similarity index 100% rename from sdk/media/azure-media-analyticsedge/dev_requirements.txt rename to sdk/media/azure-media-analytics-edge/dev_requirements.txt diff --git a/sdk/media/azure-media-analyticsedge/docs/DevTips.md b/sdk/media/azure-media-analytics-edge/docs/DevTips.md similarity index 100% rename from sdk/media/azure-media-analyticsedge/docs/DevTips.md rename to sdk/media/azure-media-analytics-edge/docs/DevTips.md diff --git a/sdk/media/azure-media-analyticsedge/samples/sample_lva.py b/sdk/media/azure-media-analytics-edge/samples/sample_lva.py similarity index 99% rename from sdk/media/azure-media-analyticsedge/samples/sample_lva.py rename to sdk/media/azure-media-analytics-edge/samples/sample_lva.py index 139a349bbcac..ea6058734888 100644 --- a/sdk/media/azure-media-analyticsedge/samples/sample_lva.py +++ b/sdk/media/azure-media-analytics-edge/samples/sample_lva.py @@ -2,7 +2,7 @@ import json import os #from azure.media.livevideoanalytics.edge._generated.models import * -from azure.media.analytics.edge import * +from azure.media.analyticsedge import * from azure.iot.hub import IoTHubRegistryManager from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult from datetime import time diff --git a/sdk/media/azure-media-analyticsedge/sdk_packaging.toml b/sdk/media/azure-media-analytics-edge/sdk_packaging.toml similarity index 100% rename from sdk/media/azure-media-analyticsedge/sdk_packaging.toml rename to sdk/media/azure-media-analytics-edge/sdk_packaging.toml diff --git a/sdk/media/azure-media-analyticsedge/setup.cfg b/sdk/media/azure-media-analytics-edge/setup.cfg similarity index 100% rename from sdk/media/azure-media-analyticsedge/setup.cfg rename to sdk/media/azure-media-analytics-edge/setup.cfg diff --git a/sdk/media/azure-media-analyticsedge/setup.py b/sdk/media/azure-media-analytics-edge/setup.py similarity index 96% rename from sdk/media/azure-media-analyticsedge/setup.py rename to sdk/media/azure-media-analytics-edge/setup.py index a333424a5e35..ac63e10dfe9e 100644 --- a/sdk/media/azure-media-analyticsedge/setup.py +++ b/sdk/media/azure-media-analytics-edge/setup.py @@ -14,12 +14,11 @@ # Change the PACKAGE_NAME only to change folder and different name PACKAGE_NAME = "azure-media-analytics-edge" +NAMESPACE_NAME = "azure.media.analyticsedge" PACKAGE_PPRINT_NAME = "Azure Media Live Video Analytics Edge SDK" # a-b-c => a/b/c -package_folder_path = PACKAGE_NAME.replace('-', '/') -# a-b-c => a.b.c -namespace_name = PACKAGE_NAME.replace('-', '.') +package_folder_path = NAMESPACE_NAME.replace('.', '/') # azure v0.x is not compatible with this package # azure v0.x used to have a __version__ attribute (newer versions don't) diff --git a/sdk/media/azure-media-analyticsedge/swagger/autorest.md b/sdk/media/azure-media-analytics-edge/swagger/autorest.md similarity index 82% rename from sdk/media/azure-media-analyticsedge/swagger/autorest.md rename to sdk/media/azure-media-analytics-edge/swagger/autorest.md index 23c7e8518044..919859203e35 100644 --- a/sdk/media/azure-media-analyticsedge/swagger/autorest.md +++ b/sdk/media/azure-media-analytics-edge/swagger/autorest.md @@ -11,8 +11,8 @@ autorest --v3 --python ```yaml require: https://github.com/Azure/azure-rest-api-specs/blob/7b34c62199a8d84f7252dcb8b08c1b593ae65124/specification/mediaservices/data-plane/readme.md -output-folder: ../azure/media/livevideoanalytics/edge/_generated -namespace: azure.media.livevideoanalytics.edge +output-folder: ../azure/media/analyticsedge/_generated +namespace: azure.media.analyticsedge no-namespace-folders: true license-header: MICROSOFT_MIT_NO_VERSION enable-xml: false diff --git a/sdk/media/azure-media-analyticsedge/tests/conftest.py b/sdk/media/azure-media-analytics-edge/tests/conftest.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/tests/conftest.py rename to sdk/media/azure-media-analytics-edge/tests/conftest.py diff --git a/sdk/media/azure-media-analytics-edge/tests/test_build_graph_serialize.py b/sdk/media/azure-media-analytics-edge/tests/test_build_graph_serialize.py new file mode 100644 index 000000000000..d46839833404 --- /dev/null +++ b/sdk/media/azure-media-analytics-edge/tests/test_build_graph_serialize.py @@ -0,0 +1,23 @@ +import pytest +from azure.media.analyticsedge import * + +class TestGraphBuildSerialize(): + def test_build_graph_serialize(): + graph_topology_name = "graphTopology1" + graph_properties = MediaGraphTopologyProperties() + graph_properties.description = "Continuous video recording to an Azure Media Services Asset" + user_name_param = MediaGraphParameterDeclaration(name="rtspUserName",type="String",default="dummyusername") + password_param = MediaGraphParameterDeclaration(name="rtspPassword",type="String",default="dummypassword") + url_param = MediaGraphParameterDeclaration(name="rtspUrl",type="String",default="rtsp://www.sample.com") + + source = MediaGraphRtspSource(name="rtspSource", endpoint=MediaGraphUnsecuredEndpoint(url="${rtspUrl}",credentials=MediaGraphUsernamePasswordCredentials(username="${rtspUserName}",password="${rtspPassword}"))) + node = MediaGraphNodeInput(node_name="rtspSource") + sink = MediaGraphAssetSink(name="assetsink", inputs=[node],asset_name_pattern='sampleAsset-${System.GraphTopologyName}-${System.GraphInstanceName}', segment_length="PT0H0M30S",local_media_cache_maximum_size_mi_b=2048,local_media_cache_path="/var/lib/azuremediaservices/tmp/") + graph_properties.parameters = [user_name_param, password_param, url_param] + graph_properties.sources = [source] + graph_properties.sinks = [sink] + graph = MediaGraphTopology(name=graph_topology_name,properties=graph_properties) + + set_graph_method = MediaGraphTopologySetRequest(graph=graph) + set_graph_method_serialize = set_graph_method.serialize() + assert set_graph_method_serialize['name'] == graph_topology_name \ No newline at end of file diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/__init__.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/__init__.py deleted file mode 100644 index 69e3be50dac4..000000000000 --- a/sdk/media/azure-media-analyticsedge/azure/media/analytics/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/media/azure-media-analyticsedge/tests/test_app_config.py b/sdk/media/azure-media-analyticsedge/tests/test_app_config.py deleted file mode 100644 index 57f0ccfa146f..000000000000 --- a/sdk/media/azure-media-analyticsedge/tests/test_app_config.py +++ /dev/null @@ -1,5 +0,0 @@ -import pytest - -class TestAppConfig(): - def test_something(self): - assert 1 \ No newline at end of file From 96406a1a84ff783817f367730601bacaa3da66ff Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 11 Dec 2020 10:32:24 -0800 Subject: [PATCH 29/64] updating test with missing parameter and sampels with better placeholder strings --- sdk/media/azure-media-analytics-edge/samples/sample_lva.py | 6 +++--- .../tests/test_build_graph_serialize.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/media/azure-media-analytics-edge/samples/sample_lva.py b/sdk/media/azure-media-analytics-edge/samples/sample_lva.py index ea6058734888..634266833dbf 100644 --- a/sdk/media/azure-media-analytics-edge/samples/sample_lva.py +++ b/sdk/media/azure-media-analytics-edge/samples/sample_lva.py @@ -7,9 +7,9 @@ from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult from datetime import time -device_id = "enter-your-device-name" -module_d = "enter-your-module-name" -connection_string = "enter-your-connection-string" +device_id = "device-name" +module_d = "module-name" +connection_string = "connection-string" graph_instance_name = "graphInstance1" graph_topology_name = "graphTopology1" graph_url = "rtsp://sample-url-from-camera" diff --git a/sdk/media/azure-media-analytics-edge/tests/test_build_graph_serialize.py b/sdk/media/azure-media-analytics-edge/tests/test_build_graph_serialize.py index d46839833404..fc8fe7185dc8 100644 --- a/sdk/media/azure-media-analytics-edge/tests/test_build_graph_serialize.py +++ b/sdk/media/azure-media-analytics-edge/tests/test_build_graph_serialize.py @@ -2,7 +2,7 @@ from azure.media.analyticsedge import * class TestGraphBuildSerialize(): - def test_build_graph_serialize(): + def test_build_graph_serialize(self): graph_topology_name = "graphTopology1" graph_properties = MediaGraphTopologyProperties() graph_properties.description = "Continuous video recording to an Azure Media Services Asset" From 379967ce62d3ed7659ef9a29ceb4959fded18aff Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 11 Dec 2020 10:54:54 -0800 Subject: [PATCH 30/64] updating ci file --- sdk/media/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/media/ci.yml b/sdk/media/ci.yml index 54151e8c64ca..d1217dc200f9 100644 --- a/sdk/media/ci.yml +++ b/sdk/media/ci.yml @@ -30,6 +30,6 @@ extends: Artifacts: - name: azure_mgmt_media safeName: azuremgmtmedia - - name: azure_media_analyticsedge + - name: azure_media_analytics_edge safeName: azuremediaanalyticsedge From 2d511428a10ee314f20d44964ceb6cd1463d0e36 Mon Sep 17 00:00:00 2001 From: hivyas Date: Tue, 17 Nov 2020 15:29:53 -0800 Subject: [PATCH 31/64] removing tests and hardcoded strings --- sdk/media/azure-media-lva-edge/MANIFEST.in | 1 - .../samples/sample_lva.py | 2 +- .../azure-media-lva-edge/swagger/README.md | 2 +- .../swagger/appconfiguration.json | 1239 ----------------- .../tests/_shared/asynctestcase.py | 79 -- .../tests/_shared/testcase.py | 0 .../azure-media-lva-edge/tests/conftest.py | 25 - .../tests/test_app_config.py | 1 - 8 files changed, 2 insertions(+), 1347 deletions(-) delete mode 100644 sdk/media/azure-media-lva-edge/swagger/appconfiguration.json delete mode 100644 sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py delete mode 100644 sdk/media/azure-media-lva-edge/tests/_shared/testcase.py delete mode 100644 sdk/media/azure-media-lva-edge/tests/conftest.py delete mode 100644 sdk/media/azure-media-lva-edge/tests/test_app_config.py diff --git a/sdk/media/azure-media-lva-edge/MANIFEST.in b/sdk/media/azure-media-lva-edge/MANIFEST.in index 7ebdd947f8ff..4a340e3b7f85 100644 --- a/sdk/media/azure-media-lva-edge/MANIFEST.in +++ b/sdk/media/azure-media-lva-edge/MANIFEST.in @@ -1,4 +1,3 @@ -recursive-include tests *.py include *.md include azure/__init__.py recursive-include samples *.py *.md diff --git a/sdk/media/azure-media-lva-edge/samples/sample_lva.py b/sdk/media/azure-media-lva-edge/samples/sample_lva.py index 9ac9ca9a817a..9b5e91818af6 100644 --- a/sdk/media/azure-media-lva-edge/samples/sample_lva.py +++ b/sdk/media/azure-media-lva-edge/samples/sample_lva.py @@ -8,7 +8,7 @@ device_id = "lva-sample-device" module_d = "lvaEdge" -connection_string = "HostName=lvasamplehub77xvrvtar2bpw.azure-devices.net;SharedAccessKeyName=iothubowner;SharedAccessKey=o77hgzsswnBZsaGKVSDjSmm53m4ViJb/s1xv9zfDCi0=" +connection_string = os.getenv("IOTHUB_DEVICE_CONNECTION_STRING") graph_instance_name = "graphInstance1" graph_topology_name = "graphTopology1" diff --git a/sdk/media/azure-media-lva-edge/swagger/README.md b/sdk/media/azure-media-lva-edge/swagger/README.md index 7880fc364c91..e80c97ff0f3c 100644 --- a/sdk/media/azure-media-lva-edge/swagger/README.md +++ b/sdk/media/azure-media-lva-edge/swagger/README.md @@ -11,7 +11,7 @@ autorest --v3 --python README.md ### Settings ```yaml -require: C:\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md +require: <>Azure\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md output-folder: ../azure/media/lva/edge/_generated namespace: azure.media.lva.edge no-namespace-folders: true diff --git a/sdk/media/azure-media-lva-edge/swagger/appconfiguration.json b/sdk/media/azure-media-lva-edge/swagger/appconfiguration.json deleted file mode 100644 index 36b206ca6142..000000000000 --- a/sdk/media/azure-media-lva-edge/swagger/appconfiguration.json +++ /dev/null @@ -1,1239 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "description": "Direct Methods for Live Video Analytics on IoT Edge.", - "version": "1.0.4", - "title": "Direct Methods for Live Video Analytics on IoT Edge", - "contact": { - "email": "amshelp@microsoft.com" - } - }, - "security": [ - { - "sharedAccessSignature": [] - } - ], - "paths": {}, - "securityDefinitions": { - "sharedAccessSignature": { - "type": "apiKey", - "name": "Authorization", - "in": "header" - } - }, - "definitions": { - "OperationBase": { - "type": "object", - "properties": { - "methodName": { - "type": "string", - "description": "method name", - "readOnly": true - }, - "@apiVersion": { - "type": "string", - "description": "api version", - "enum": [ - "1.0" - ], - "x-ms-enum": { - "name": "ApiVersionEnum", - "modelAsString": false - } - } - }, - "discriminator": "methodName" - }, - "MediaGraphTopologySetRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphTopologySet", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - } - ], - "required": [ - "graph" - ], - "properties": { - "graph": { - "$ref": "#/definitions/MediaGraphTopology" - } - } - }, - "MediaGraphTopologySetRequestBody": { - "type": "object", - "x-ms-discriminator-value": "GraphTopologySet", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - }, - { - "$ref": "#/definitions/MediaGraphTopology" - } - ] - }, - "MediaGraphInstanceSetRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceSet", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - } - ], - "required": [ - "instance" - ], - "properties": { - "instance": { - "$ref": "#/definitions/MediaGraphInstance" - } - } - }, - "ItemNonSetRequestBase": { - "type": "object", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - } - ], - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string", - "description": "method name" - } - } - }, - "MediaGraphTopologyListRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphTopologyList", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - } - ] - }, - "MediaGraphTopologyGetRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphTopologyGet", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphTopologyDeleteRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphTopologyDelete", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphInstanceListRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceList", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - } - ] - }, - "MediaGraphInstanceGetRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceGet", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphInstanceActivateRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceActivate", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphInstanceDeActivateRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceDeactivate", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphInstanceDeleteRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceDelete", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphInstance": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string", - "description": "name" - }, - "systemData": { - "$ref": "#/definitions/MediaGraphSystemData" - }, - "properties": { - "$ref": "#/definitions/MediaGraphInstanceProperties" - } - }, - "description": "Represents a Media Graph instance." - }, - "MediaGraphInstanceProperties": { - "type": "object", - "properties": { - "description": { - "type": "string", - "description": "An optional description for the instance." - }, - "topologyName": { - "type": "string", - "description": "The name of the graph topology that this instance will run. A topology with this name should already have been set in the Edge module." - }, - "parameters": { - "type": "array", - "description": "List of one or more graph instance parameters.", - "items": { - "$ref": "#/definitions/MediaGraphParameterDefinition" - } - }, - "state": { - "type": "string", - "description": "Allowed states for a graph Instance.", - "enum": [ - "Inactive", - "Activating", - "Active", - "Deactivating" - ], - "x-ms-enum": { - "name": "MediaGraphInstanceState", - "values": [ - { - "value": "Inactive", - "description": "Inactive state." - }, - { - "value": "Activating", - "description": "Activating state." - }, - { - "value": "Active", - "description": "Active state." - }, - { - "value": "Deactivating", - "description": "Deactivating state." - } - ], - "modelAsString": false - } - } - }, - "description": "Properties of a Media Graph instance." - }, - "MediaGraphParameterDefinition": { - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "type": "string", - "description": "Name of parameter as defined in the graph topology." - }, - "value": { - "type": "string", - "description": "Value of parameter." - } - }, - "description": "A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters." - }, - "MediaGraphInstanceCollection": { - "properties": { - "value": { - "type": "array", - "description": "Collection of graph instances.", - "items": { - "$ref": "#/definitions/MediaGraphInstance" - } - }, - "@continuationToken": { - "type": "string", - "description": "Continuation token to use in subsequent calls to enumerate through the graph instance collection (when the collection contains too many results to return in one response)." - } - }, - "description": "Collection of graph instances." - }, - "MediaGraphTopologyCollection": { - "properties": { - "value": { - "type": "array", - "description": "Collection of graph topologies.", - "items": { - "$ref": "#/definitions/MediaGraphTopology" - } - }, - "@continuationToken": { - "type": "string", - "description": "Continuation token to use in subsequent calls to enumerate through the graph topologies collection (when the collection contains too many results to return in one response)." - } - }, - "description": "Collection of graph topologies." - }, - "MediaGraphTopology": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string", - "description": "name" - }, - "systemData": { - "$ref": "#/definitions/MediaGraphSystemData" - }, - "properties": { - "$ref": "#/definitions/MediaGraphTopologyProperties" - } - }, - "description": "Describes a graph topology." - }, - "MediaGraphTopologyProperties": { - "type": "object", - "properties": { - "description": { - "type": "string", - "description": "An optional description for the instance." - }, - "parameters": { - "type": "array", - "description": "An optional description for the instance.", - "items": { - "$ref": "#/definitions/MediaGraphParameterDeclaration" - } - }, - "sources": { - "type": "array", - "description": "An optional description for the instance.", - "items": { - "$ref": "#/definitions/MediaGraphSource" - } - }, - "processors": { - "type": "array", - "description": "An optional description for the instance.", - "items": { - "$ref": "#/definitions/MediaGraphProcessor" - } - }, - "sinks": { - "description": "name", - "type": "array", - "items": { - "$ref": "#/definitions/MediaGraphSink" - } - } - }, - "description": "Describes the properties of a graph topology." - }, - "MediaGraphSystemData": { - "type": "object", - "properties": { - "createdAt": { - "type": "string", - "format": "date-time", - "description": "The timestamp of resource creation (UTC)." - }, - "lastModifiedAt": { - "type": "string", - "format": "date-time", - "description": "The timestamp of resource last modification (UTC)." - } - }, - "description": "Graph system data." - }, - "MediaGraphParameterDeclaration": { - "type": "object", - "required": [ - "name", - "type" - ], - "properties": { - "name": { - "type": "string", - "description": "The name of the parameter.", - "maxLength": 64 - }, - "type": { - "type": "string", - "description": "name", - "enum": [ - "String", - "SecretString", - "Int", - "Double", - "Bool" - ], - "x-ms-enum": { - "name": "MediaGraphParameterType", - "values": [ - { - "value": "String", - "description": "A string parameter value." - }, - { - "value": "SecretString", - "description": "A string to hold sensitive information as parameter value." - }, - { - "value": "Int", - "description": "A 32-bit signed integer as parameter value." - }, - { - "value": "Double", - "description": "A 64-bit double-precision floating point type as parameter value." - }, - { - "value": "Bool", - "description": "A boolean value that is either true or false." - } - ], - "modelAsString": false - } - }, - "description": { - "type": "string", - "description": "Description of the parameter." - }, - "default": { - "type": "string", - "description": "The default value for the parameter, to be used if the graph instance does not specify a value." - } - }, - "description": "The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters." - }, - "MediaGraphSource": { - "type": "object", - "required": [ - "@type", - "name" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The type of the source node. The discriminator for derived types." - }, - "name": { - "type": "string", - "description": "The name to be used for this source node." - } - }, - "description": "Media graph source." - }, - "MediaGraphRtspSource": { - "properties": { - "transport": { - "type": "string", - "description": "Underlying RTSP transport. This is used to enable or disable HTTP tunneling.", - "enum": [ - "Http", - "Tcp" - ], - "x-ms-enum": { - "name": "MediaGraphRtspTransport", - "values": [ - { - "value": "Http", - "description": "HTTP/HTTPS transport. This should be used when HTTP tunneling is desired." - }, - { - "value": "Tcp", - "description": "TCP transport. This should be used when HTTP tunneling is NOT desired." - } - ], - "modelAsString": true - } - }, - "endpoint": { - "description": "RTSP endpoint of the stream that is being connected to.", - "$ref": "#/definitions/MediaGraphEndpoint" - } - }, - "required": [ - "endpoint" - ], - "allOf": [ - { - "$ref": "#/definitions/MediaGraphSource" - }, - {} - ], - "description": "Enables a graph to capture media from a RTSP server.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphRtspSource" - }, - "MediaGraphIoTHubMessageSource": { - "properties": { - "hubInputName": { - "type": "string", - "description": "Name of the input path where messages can be routed to (via routes declared in the IoT Edge deployment manifest)." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphSource" - }, - {} - ], - "description": "Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphIoTHubMessageSource" - }, - "MediaGraphIoTHubMessageSink": { - "properties": { - "hubOutputName": { - "type": "string", - "description": "Name of the output path to which the graph will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphSink" - }, - {} - ], - "description": "Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphIoTHubMessageSink" - }, - "MediaGraphEndpoint": { - "type": "object", - "required": [ - "@type", - "url" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - }, - "credentials": { - "description": "Polymorphic credentials to be presented to the endpoint.", - "$ref": "#/definitions/MediaGraphCredentials" - }, - "url": { - "type": "string", - "description": "Url for the endpoint." - } - }, - "description": "Base class for endpoints." - }, - "MediaGraphCredentials": { - "type": "object", - "required": [ - "@type" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - } - }, - "description": "Credentials to present during authentication." - }, - "MediaGraphUsernamePasswordCredentials": { - "properties": { - "username": { - "type": "string", - "description": "Username for a username/password pair." - }, - "password": { - "type": "string", - "description": "Password for a username/password pair." - } - }, - "required": [ - "username" - ], - "allOf": [ - { - "$ref": "#/definitions/MediaGraphCredentials" - }, - {} - ], - "description": "Username/password credential pair.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphUsernamePasswordCredentials" - }, - "MediaGraphHttpHeaderCredentials": { - "properties": { - "headerName": { - "type": "string", - "description": "HTTP header name." - }, - "headerValue": { - "type": "string", - "description": "HTTP header value." - } - }, - "required": [ - "headerName", - "headerValue" - ], - "allOf": [ - { - "$ref": "#/definitions/MediaGraphCredentials" - }, - {} - ], - "description": "Http header service credentials.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphHttpHeaderCredentials" - }, - "MediaGraphUnsecuredEndpoint": { - "allOf": [ - { - "$ref": "#/definitions/MediaGraphEndpoint" - }, - {} - ], - "description": "An endpoint that the media graph can connect to, with no encryption in transit.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphUnsecuredEndpoint" - }, - "MediaGraphTlsEndpoint": { - "properties": { - "trustedCertificates": { - "description": "Trusted certificates when authenticating a TLS connection. Null designates that Azure Media Service's source of trust should be used.", - "$ref": "#/definitions/MediaGraphCertificateSource" - }, - "validationOptions": { - "description": "Validation options to use when authenticating a TLS connection. By default, strict validation is used.", - "$ref": "#/definitions/MediaGraphTlsValidationOptions" - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphEndpoint" - }, - {} - ], - "description": "An endpoint that the graph can connect to, which must be connected over TLS/SSL.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphTlsEndpoint" - }, - "MediaGraphCertificateSource": { - "type": "object", - "required": [ - "@type" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - } - }, - "description": "Base class for certificate sources." - }, - "MediaGraphTlsValidationOptions": { - "type": "object", - "properties": { - "ignoreHostname": { - "type": "string", - "description": "Boolean value ignoring the host name (common name) during validation." - }, - "ignoreSignature": { - "type": "string", - "description": "Boolean value ignoring the integrity of the certificate chain at the current time." - } - }, - "description": "Options for controlling the authentication of TLS endpoints." - }, - "MediaGraphPemCertificateList": { - "properties": { - "certificates": { - "type": "array", - "description": "PEM formatted public certificates one per entry.", - "items": { - "type": "string" - } - } - }, - "required": [ - "certificates" - ], - "allOf": [ - { - "$ref": "#/definitions/MediaGraphCertificateSource" - }, - {} - ], - "description": "A list of PEM formatted certificates.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphPemCertificateList" - }, - "MediaGraphSink": { - "type": "object", - "required": [ - "@type", - "inputs", - "name" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - }, - "name": { - "type": "string", - "description": "Name to be used for the media graph sink." - }, - "inputs": { - "type": "array", - "description": "An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node.", - "items": { - "$ref": "#/definitions/MediaGraphNodeInput" - } - } - }, - "description": "Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module." - }, - "MediaGraphNodeInput": { - "type": "object", - "properties": { - "nodeName": { - "type": "string", - "description": "The name of another node in the media graph, the output of which is used as input to this node." - }, - "outputSelectors": { - "type": "array", - "description": "Allows for the selection of particular streams from another node.", - "items": { - "$ref": "#/definitions/MediaGraphOutputSelector" - } - } - }, - "description": "Represents the input to any node in a media graph." - }, - "MediaGraphOutputSelector": { - "properties": { - "property": { - "type": "string", - "description": "The stream property to compare with.", - "enum": [ - "mediaType" - ], - "x-ms-enum": { - "name": "MediaGraphOutputSelectorProperty", - "values": [ - { - "value": "mediaType", - "description": "The stream's MIME type or subtype." - } - ], - "modelAsString": false - } - }, - "operator": { - "type": "string", - "description": "The operator to compare streams by.", - "enum": [ - "is", - "isNot" - ], - "x-ms-enum": { - "name": "MediaGraphOutputSelectorOperator", - "values": [ - { - "value": "is", - "description": "A media type is the same type or a subtype." - }, - { - "value": "isNot", - "description": "A media type is not the same type or a subtype." - } - ], - "modelAsString": false - } - }, - "value": { - "type": "string", - "description": "Value to compare against." - } - }, - "description": "Allows for the selection of particular streams from another node." - }, - "MediaGraphFileSink": { - "properties": { - "filePathPattern": { - "type": "string", - "description": "Absolute file path pattern for creating new files on the Edge device.", - "minLength": 1 - } - }, - "required": [ - "filePathPattern" - ], - "allOf": [ - { - "$ref": "#/definitions/MediaGraphSink" - }, - {} - ], - "description": "Enables a media graph to write/store media (video and audio) to a file on the Edge device.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphFileSink" - }, - "MediaGraphAssetSink": { - "properties": { - "assetNamePattern": { - "type": "string", - "description": "A name pattern when creating new assets." - }, - "segmentLength": { - "type": "string", - "format": "duration", - "example": "PT30S", - "description": "When writing media to an asset, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum of 30 seconds and a recommended maximum of 5 minutes." - }, - "localMediaCachePath": { - "type": "string", - "description": "Path to a local file system directory for temporary caching of media, before writing to an Asset. Used when the Edge device is temporarily disconnected from Azure." - }, - "localMediaCacheMaximumSizeMiB": { - "type": "string", - "description": "Maximum amount of disk space that can be used for temporary caching of media." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphSink" - }, - {} - ], - "description": "Enables a graph to record media to an Azure Media Services asset, for subsequent playback.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphAssetSink" - }, - "MediaGraphProcessor": { - "type": "object", - "required": [ - "@type", - "inputs", - "name" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - }, - "name": { - "type": "string", - "description": "The name for this processor node." - }, - "inputs": { - "type": "array", - "description": "An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node.", - "items": { - "$ref": "#/definitions/MediaGraphNodeInput" - } - } - }, - "description": "A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output." - }, - "MediaGraphMotionDetectionProcessor": { - "properties": { - "sensitivity": { - "type": "string", - "description": "Enumeration that specifies the sensitivity of the motion detection processor.", - "enum": [ - "Low", - "Medium", - "High" - ], - "x-ms-enum": { - "name": "MediaGraphMotionDetectionSensitivity", - "values": [ - { - "value": "Low", - "description": "Low Sensitivity." - }, - { - "value": "Medium", - "description": "Medium Sensitivity." - }, - { - "value": "High", - "description": "High Sensitivity." - } - ], - "modelAsString": true - } - }, - "outputMotionRegion": { - "type": "boolean", - "description": "Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphProcessor" - }, - {} - ], - "description": "A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphMotionDetectionProcessor" - }, - "MediaGraphExtensionProcessorBase": { - "properties": { - "endpoint": { - "description": "Endpoint to which this processor should connect.", - "$ref": "#/definitions/MediaGraphEndpoint" - }, - "image": { - "description": "Describes the parameters of the image that is sent as input to the endpoint.", - "$ref": "#/definitions/MediaGraphImage" - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphProcessor" - }, - {} - ], - "description": "Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphExtensionProcessorBase" - }, - "MediaGraphCognitiveServicesVisionExtension": { - "properties": {}, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphExtensionProcessorBase" - } - ], - "description": "A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension" - }, - "MediaGraphGrpcExtension": { - "required": [ - "dataTransfer" - ], - "properties": { - "dataTransfer": { - "description": "How media should be transferred to the inferencing engine.", - "$ref": "#/definitions/MediaGraphGrpcExtensionDataTransfer" - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphExtensionProcessorBase" - }, - {} - ], - "description": "A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphGrpcExtension" - }, - "MediaGraphGrpcExtensionDataTransfer": { - "required": [ - "mode" - ], - "properties": { - "sharedMemorySizeMiB": { - "type": "string", - "description": "The size of the buffer for all in-flight frames in mebibytes if mode is SharedMemory. Should not be specificed otherwise." - }, - "mode": { - "type": "string", - "description": "How frame data should be transmitted to the inferencing engine.", - "enum": [ - "Embedded", - "SharedMemory" - ], - "x-ms-enum": { - "name": "MediaGraphGrpcExtensionDataTransferMode", - "values": [ - { - "value": "Embedded", - "description": "Frames are transferred embedded into the gRPC messages." - }, - { - "value": "SharedMemory", - "description": "Frames are transferred through shared memory." - } - ], - "modelAsString": true - } - } - }, - "description": "Describes how media should be transferred to the inferencing engine.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphGrpcExtension" - }, - "MediaGraphHttpExtension": { - "allOf": [ - { - "$ref": "#/definitions/MediaGraphExtensionProcessorBase" - }, - {} - ], - "description": "A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphHttpExtension" - }, - "MediaGraphImage": { - "type": "object", - "properties": { - "scale": { - "$ref": "#/definitions/MediaGraphImageScale" - }, - "format": { - "$ref": "#/definitions/MediaGraphImageFormat" - } - }, - "description": "Describes the properties of an image frame." - }, - "MediaGraphImageScale": { - "type": "object", - "properties": { - "mode": { - "type": "string", - "description": "Describes the modes for scaling an input video frame into an image, before it is sent to an inference engine.", - "enum": [ - "PreserveAspectRatio", - "Pad", - "Stretch" - ], - "x-ms-enum": { - "name": "MediaGraphImageScaleMode", - "values": [ - { - "value": "PreserveAspectRatio", - "description": "Use the same aspect ratio as the input frame." - }, - { - "value": "Pad", - "description": "Center pad the input frame to match the given dimensions." - }, - { - "value": "Stretch", - "description": "Stretch input frame to match given dimensions." - } - ], - "modelAsString": true - } - }, - "width": { - "type": "string", - "description": "The desired output width of the image." - }, - "height": { - "type": "string", - "description": "The desired output height of the image." - } - }, - "description": "The scaling mode for the image." - }, - "MediaGraphImageFormat": { - "required": [ - "@type" - ], - "type": "object", - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - } - }, - "description": "Encoding settings for an image.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormat" - }, - "MediaGraphImageFormatRaw": { - "properties": { - "pixelFormat": { - "type": "string", - "description": "pixel format", - "enum": [ - "Yuv420p", - "Rgb565be", - "Rgb565le", - "Rgb555be", - "Rgb555le", - "Rgb24", - "Bgr24", - "Argb", - "Rgba", - "Abgr", - "Bgra" - ], - "x-ms-enum": { - "name": "MediaGraphImageFormatRawPixelFormat", - "values": [ - { - "value": "Yuv420p", - "description": "Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples)." - }, - { - "value": "Rgb565be", - "description": "Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian." - }, - { - "value": "Rgb565le", - "description": "Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian." - }, - { - "value": "Rgb555be", - "description": "Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined." - }, - { - "value": "Rgb555le", - "description": "Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined." - }, - { - "value": "Rgb24", - "description": "Packed RGB 8:8:8, 24bpp, RGBRGB." - }, - { - "value": "Bgr24", - "description": "Packed RGB 8:8:8, 24bpp, BGRBGR." - }, - { - "value": "Argb", - "description": "Packed ARGB 8:8:8:8, 32bpp, ARGBARGB." - }, - { - "value": "Rgba", - "description": "Packed RGBA 8:8:8:8, 32bpp, RGBARGBA." - }, - { - "value": "Abgr", - "description": "Packed ABGR 8:8:8:8, 32bpp, ABGRABGR." - }, - { - "value": "Bgra", - "description": "Packed BGRA 8:8:8:8, 32bpp, BGRABGRA." - } - ], - "modelAsString": true - } - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphImageFormat" - }, - {} - ], - "description": "Encoding settings for raw images.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatRaw" - }, - "MediaGraphImageFormatEncoded": { - "properties": { - "encoding": { - "type": "string", - "description": "The different encoding formats that can be used for the image.", - "default": "Jpeg", - "enum": [ - "Jpeg", - "Bmp", - "Png" - ], - "x-ms-enum": { - "name": "MediaGraphImageEncodingFormat", - "values": [ - { - "value": "Jpeg", - "description": "JPEG image format." - }, - { - "value": "Bmp", - "description": "BMP image format." - }, - { - "value": "Png", - "description": "PNG image format." - } - ], - "modelAsString": true - } - }, - "quality": { - "type": "string", - "description": "The image quality (used for JPEG only). Value must be between 0 to 100 (best quality)." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphImageFormat" - }, - {} - ], - "description": "Allowed formats for the image.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatEncoded" - }, - "MediaGraphSignalGateProcessor": { - "properties": { - "activationEvaluationWindow": { - "type": "string", - "example": "PT1.0S", - "description": "The period of time over which the gate gathers input events, before evaluating them." - }, - "activationSignalOffset": { - "type": "string", - "example": "-PT1.0S", - "description": "Signal offset once the gate is activated (can be negative). It is an offset between the time the event is received, and the timestamp of the first media sample (eg. video frame) that is allowed through by the gate." - }, - "minimumActivationTime": { - "type": "string", - "example": "PT1S", - "description": "The minimum period for which the gate remains open, in the absence of subsequent triggers (events)." - }, - "maximumActivationTime": { - "type": "string", - "example": "PT2S", - "description": "The maximum period for which the gate remains open, in the presence of subsequent events." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphProcessor" - }, - {} - ], - "description": "A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphSignalGateProcessor" - }, - "MediaGraphFrameRateFilterProcessor": { - "properties": { - "maximumFps": { - "type": "string", - "description": "Ensures that the frame rate of the video leaving this processor does not exceed this limit." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphProcessor" - }, - {} - ], - "description": "Limits the frame rate on the input video stream based on the maximumFps property.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphFrameRateFilterProcessor" - } - } -} diff --git a/sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py b/sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py deleted file mode 100644 index 53b2dcb4ba92..000000000000 --- a/sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py +++ /dev/null @@ -1,79 +0,0 @@ -import asyncio -import functools -import os - -from azure_devtools.scenario_tests.utilities import trim_kwargs_from_test_function -from devtools_testutils.azure_testcase import _is_autorest_v3 - -from .testcase import AppConfigTestCase - -class AsyncAppConfigTestCase(AppConfigTestCase): - def __init__(self, *args, **kwargs): - super(AppConfigTestCase, self).__init__(*args, **kwargs) - - class AsyncFakeCredential(object): - # fake async credential - async def get_token(self, *scopes, **kwargs): - return AccessToken('fake_token', 2527537086) - - async def close(self): - pass - - def create_basic_client(self, client_class, **kwargs): - # This is the patch for creating client using aio identity - - tenant_id = os.environ.get("AZURE_TENANT_ID", None) - client_id = os.environ.get("AZURE_CLIENT_ID", None) - secret = os.environ.get("AZURE_CLIENT_SECRET", None) - - if tenant_id and client_id and secret and self.is_live: - if _is_autorest_v3(client_class): - # Create azure-identity class using aio credential - from azure.identity.aio import ClientSecretCredential - credentials = ClientSecretCredential( - tenant_id=tenant_id, - client_id=client_id, - client_secret=secret - ) - else: - # Create msrestazure class - from msrestazure.azure_active_directory import ServicePrincipalCredentials - credentials = ServicePrincipalCredentials( - tenant=tenant_id, - client_id=client_id, - secret=secret - ) - else: - if _is_autorest_v3(client_class): - credentials = self.AsyncFakeCredential() - #credentials = self.settings.get_azure_core_credentials() - else: - credentials = self.settings.get_credentials() - - # Real client creation - # FIXME decide what is the final argument for that - # if self.is_playback(): - # kwargs.setdefault("polling_interval", 0) - if _is_autorest_v3(client_class): - kwargs.setdefault("logging_enable", True) - client = client_class( - credential=credentials, - **kwargs - ) - else: - client = client_class( - credentials=credentials, - **kwargs - ) - - if self.is_playback(): - try: - client._config.polling_interval = 0 # FIXME in azure-mgmt-core, make this a kwargs - except AttributeError: - pass - - if hasattr(client, "config"): # Autorest v2 - if self.is_playback(): - client.config.long_running_operation_timeout = 0 - client.config.enable_http_logger = True - return client diff --git a/sdk/media/azure-media-lva-edge/tests/_shared/testcase.py b/sdk/media/azure-media-lva-edge/tests/_shared/testcase.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/sdk/media/azure-media-lva-edge/tests/conftest.py b/sdk/media/azure-media-lva-edge/tests/conftest.py deleted file mode 100644 index c36aaed14908..000000000000 --- a/sdk/media/azure-media-lva-edge/tests/conftest.py +++ /dev/null @@ -1,25 +0,0 @@ -# -------------------------------------------------------------------------- -# -# Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -# -------------------------------------------------------------------------- diff --git a/sdk/media/azure-media-lva-edge/tests/test_app_config.py b/sdk/media/azure-media-lva-edge/tests/test_app_config.py deleted file mode 100644 index 5871ed8eef2f..000000000000 --- a/sdk/media/azure-media-lva-edge/tests/test_app_config.py +++ /dev/null @@ -1 +0,0 @@ -import pytest From 6363648f2140aec6250c975a733cd75ca21ae043 Mon Sep 17 00:00:00 2001 From: hivyas Date: Wed, 18 Nov 2020 15:38:51 -0800 Subject: [PATCH 32/64] fixed tox errors --- .../azure-media-lva-edge/azure/__init__.py | 2 +- .../azure/media/__init__.py | 1 + .../azure/media/lva/__init__.py | 1 + .../azure/media/lva/edge/__init__.py | 25 ++++++++++--------- .../azure-media-lva-edge/swagger/README.md | 4 +-- .../tests/test_app_config.py | 5 ++++ 6 files changed, 23 insertions(+), 15 deletions(-) create mode 100644 sdk/media/azure-media-lva-edge/azure/media/__init__.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/__init__.py create mode 100644 sdk/media/azure-media-lva-edge/tests/test_app_config.py diff --git a/sdk/media/azure-media-lva-edge/azure/__init__.py b/sdk/media/azure-media-lva-edge/azure/__init__.py index 0e40e134bdac..e7590fb185e8 100644 --- a/sdk/media/azure-media-lva-edge/azure/__init__.py +++ b/sdk/media/azure-media-lva-edge/azure/__init__.py @@ -4,4 +4,4 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------- -__path__ = __import__("pkgutil").extend_path(__path__, __name__) \ No newline at end of file +__path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/sdk/media/azure-media-lva-edge/azure/media/__init__.py b/sdk/media/azure-media-lva-edge/azure/media/__init__.py new file mode 100644 index 000000000000..69e3be50dac4 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/__init__.py b/sdk/media/azure-media-lva-edge/azure/media/lva/__init__.py new file mode 100644 index 000000000000..69e3be50dac4 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py index 725cd6860541..2a9c3cc68e52 100644 --- a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py @@ -1,20 +1,21 @@ __path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore -from azure.media.lva.edge._generated.models import MediaGraphTopologySetRequestBody, MediaGraphTopologySetRequest, MediaGraphInstanceSetRequest, MediaGraphInstanceSetRequestBody +from azure.media.lva.edge._generated.models import (MediaGraphTopologySetRequestBody, +MediaGraphTopologySetRequest, MediaGraphInstanceSetRequest, MediaGraphInstanceSetRequestBody) def _OverrideTopologySetRequestSerialize(self): - graph_body = MediaGraphTopologySetRequestBody(name=self.graph.name) - graph_body.system_data = self.graph.system_data - graph_body.properties = self.graph.properties - - return graph_body.serialize() + graph_body = MediaGraphTopologySetRequestBody(name=self.graph.name) + graph_body.system_data = self.graph.system_data + graph_body.properties = self.graph.properties + + return graph_body.serialize() MediaGraphTopologySetRequest.serialize = _OverrideTopologySetRequestSerialize def _OverrideInstanceSetRequestSerialize(self): - graph_body = MediaGraphInstanceSetRequestBody(name=self.instance.name) - graph_body.system_data = self.instance.system_data - graph_body.properties = self.instance.properties - - return graph_body.serialize() + graph_body = MediaGraphInstanceSetRequestBody(name=self.instance.name) + graph_body.system_data = self.instance.system_data + graph_body.properties = self.instance.properties + + return graph_body.serialize() -MediaGraphInstanceSetRequest.serialize = _OverrideInstanceSetRequestSerialize \ No newline at end of file +MediaGraphInstanceSetRequest.serialize = _OverrideInstanceSetRequestSerialize diff --git a/sdk/media/azure-media-lva-edge/swagger/README.md b/sdk/media/azure-media-lva-edge/swagger/README.md index e80c97ff0f3c..ff8338377dc3 100644 --- a/sdk/media/azure-media-lva-edge/swagger/README.md +++ b/sdk/media/azure-media-lva-edge/swagger/README.md @@ -3,13 +3,13 @@ > see https://aka.ms/autorest -### Generation +## Generation ```ps cd autorest --v3 --python README.md ``` -### Settings +## Settings ```yaml require: <>Azure\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md output-folder: ../azure/media/lva/edge/_generated diff --git a/sdk/media/azure-media-lva-edge/tests/test_app_config.py b/sdk/media/azure-media-lva-edge/tests/test_app_config.py new file mode 100644 index 000000000000..57f0ccfa146f --- /dev/null +++ b/sdk/media/azure-media-lva-edge/tests/test_app_config.py @@ -0,0 +1,5 @@ +import pytest + +class TestAppConfig(): + def test_something(self): + assert 1 \ No newline at end of file From 0507a37ced9c7b2f5eebd01b7e21357b497a5da6 Mon Sep 17 00:00:00 2001 From: hivyas Date: Wed, 18 Nov 2020 16:04:26 -0800 Subject: [PATCH 33/64] modifying readme to pass tests --- sdk/media/azure-media-lva-edge/README.md | 6 +++++- sdk/media/azure-media-lva-edge/swagger/README.md | 9 ++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index c5012d4038c9..42f3c2d52227 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -6,7 +6,7 @@ Modern programs, especially programs running in a cloud, generally have many com Use the client library for App Configuration to create and manage application configuration settings. -## Prerequisites +## Getting started * Python 2.7, or 3.5 or later is required to use this package. * You need an [Azure subscription][azure_sub], and a [Configuration Store][configuration_store] to use this package. @@ -18,8 +18,12 @@ After that, create the Configuration Store: ```Powershell az appconfig create --name --resource-group --location eastus ``` +## Key Concepts +## Examples +## Troubleshooting +## Next Steps ## Contributing This project welcomes contributions and suggestions. Most contributions require diff --git a/sdk/media/azure-media-lva-edge/swagger/README.md b/sdk/media/azure-media-lva-edge/swagger/README.md index ff8338377dc3..9bd11368b134 100644 --- a/sdk/media/azure-media-lva-edge/swagger/README.md +++ b/sdk/media/azure-media-lva-edge/swagger/README.md @@ -3,13 +3,20 @@ > see https://aka.ms/autorest -## Generation +## Getting started ```ps cd autorest --v3 --python README.md ``` +## Key Concepts +## Examples + +## Troubleshooting + +## Next Steps ## Settings + ```yaml require: <>Azure\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md output-folder: ../azure/media/lva/edge/_generated From d197f2010279db8ecf59609447ffb9377bb0e595 Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 19 Nov 2020 14:50:23 -0800 Subject: [PATCH 34/64] fixed missing tests error --- sdk/media/azure-media-lva-edge/MANIFEST.in | 1 + .../azure-media-lva-edge/dev_requirements.txt | 2 +- .../azure-media-lva-edge/tests/conftest.py | 25 +++++++++++++++++++ 3 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 sdk/media/azure-media-lva-edge/tests/conftest.py diff --git a/sdk/media/azure-media-lva-edge/MANIFEST.in b/sdk/media/azure-media-lva-edge/MANIFEST.in index 4a340e3b7f85..7ebdd947f8ff 100644 --- a/sdk/media/azure-media-lva-edge/MANIFEST.in +++ b/sdk/media/azure-media-lva-edge/MANIFEST.in @@ -1,3 +1,4 @@ +recursive-include tests *.py include *.md include azure/__init__.py recursive-include samples *.py *.md diff --git a/sdk/media/azure-media-lva-edge/dev_requirements.txt b/sdk/media/azure-media-lva-edge/dev_requirements.txt index 08bcfb306787..1d971eca1249 100644 --- a/sdk/media/azure-media-lva-edge/dev_requirements.txt +++ b/sdk/media/azure-media-lva-edge/dev_requirements.txt @@ -5,7 +5,7 @@ aiohttp>=3.0; python_version >= '3.5' aiodns>=2.0; python_version >= '3.5' msrest>=0.6.10 -pytest==5.4.2 +pytest>=4.6.9 tox>=3.20.0 tox-monorepo>=0.1.2 pytest-asyncio==0.12.0 diff --git a/sdk/media/azure-media-lva-edge/tests/conftest.py b/sdk/media/azure-media-lva-edge/tests/conftest.py new file mode 100644 index 000000000000..c36aaed14908 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/tests/conftest.py @@ -0,0 +1,25 @@ +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- From 92f256ae1952c16cee7c800d2ebdd2a5e1f9be83 Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 19 Nov 2020 15:06:09 -0800 Subject: [PATCH 35/64] fixing dev requirments --- sdk/media/azure-media-lva-edge/README.md | 6 ++++-- .../azure-media-lva-edge/dev_requirements.txt | 1 - sdk/media/azure-media-lva-edge/swagger/README.md | 14 ++++++++------ 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index 42f3c2d52227..1d7f3a425d64 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -19,11 +19,13 @@ After that, create the Configuration Store: az appconfig create --name --resource-group --location eastus ``` ## Key Concepts +sample ## Examples - +sample ## Troubleshooting - +sample ## Next Steps +sample ## Contributing This project welcomes contributions and suggestions. Most contributions require diff --git a/sdk/media/azure-media-lva-edge/dev_requirements.txt b/sdk/media/azure-media-lva-edge/dev_requirements.txt index 1d971eca1249..cca01aec8af4 100644 --- a/sdk/media/azure-media-lva-edge/dev_requirements.txt +++ b/sdk/media/azure-media-lva-edge/dev_requirements.txt @@ -5,7 +5,6 @@ aiohttp>=3.0; python_version >= '3.5' aiodns>=2.0; python_version >= '3.5' msrest>=0.6.10 -pytest>=4.6.9 tox>=3.20.0 tox-monorepo>=0.1.2 pytest-asyncio==0.12.0 diff --git a/sdk/media/azure-media-lva-edge/swagger/README.md b/sdk/media/azure-media-lva-edge/swagger/README.md index 9bd11368b134..27ae17c26693 100644 --- a/sdk/media/azure-media-lva-edge/swagger/README.md +++ b/sdk/media/azure-media-lva-edge/swagger/README.md @@ -1,7 +1,5 @@ # Azure Queue Storage for Python - -> see https://aka.ms/autorest - +see `https://aka.ms/autorest` ## Getting started ```ps @@ -9,12 +7,16 @@ cd autorest --v3 --python README.md ``` ## Key Concepts - +sample ## Examples - +sample ## Troubleshooting - +sample ## Next Steps +sample + +## Contributing +sample ## Settings ```yaml From 31826c385dea5c9f6bea90242348c675b6b57998 Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 19 Nov 2020 15:27:35 -0800 Subject: [PATCH 36/64] fixing readme and dev requirment --- sdk/media/azure-media-lva-edge/README.md | 2 ++ sdk/media/azure-media-lva-edge/dev_requirements.txt | 3 +-- sdk/media/azure-media-lva-edge/swagger/README.md | 3 +++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index 1d7f3a425d64..0b3e33a1f15e 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -19,12 +19,14 @@ After that, create the Configuration Store: az appconfig create --name --resource-group --location eastus ``` ## Key Concepts + sample ## Examples sample ## Troubleshooting sample ## Next Steps + sample ## Contributing diff --git a/sdk/media/azure-media-lva-edge/dev_requirements.txt b/sdk/media/azure-media-lva-edge/dev_requirements.txt index cca01aec8af4..c3cf063e6b31 100644 --- a/sdk/media/azure-media-lva-edge/dev_requirements.txt +++ b/sdk/media/azure-media-lva-edge/dev_requirements.txt @@ -6,5 +6,4 @@ aiohttp>=3.0; python_version >= '3.5' aiodns>=2.0; python_version >= '3.5' msrest>=0.6.10 tox>=3.20.0 -tox-monorepo>=0.1.2 -pytest-asyncio==0.12.0 +tox-monorepo>=0.1.2 \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/swagger/README.md b/sdk/media/azure-media-lva-edge/swagger/README.md index 27ae17c26693..de1a5c4080de 100644 --- a/sdk/media/azure-media-lva-edge/swagger/README.md +++ b/sdk/media/azure-media-lva-edge/swagger/README.md @@ -1,4 +1,5 @@ # Azure Queue Storage for Python + see `https://aka.ms/autorest` ## Getting started @@ -7,12 +8,14 @@ cd autorest --v3 --python README.md ``` ## Key Concepts + sample ## Examples sample ## Troubleshooting sample ## Next Steps + sample ## Contributing From a3df331d53a8fedd14c9723338440abdd90a9742 Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 20 Nov 2020 12:00:14 -0800 Subject: [PATCH 37/64] renaming readme file --- .../swagger/{README.md => autorest.md} | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) rename sdk/media/azure-media-lva-edge/swagger/{README.md => autorest.md} (79%) diff --git a/sdk/media/azure-media-lva-edge/swagger/README.md b/sdk/media/azure-media-lva-edge/swagger/autorest.md similarity index 79% rename from sdk/media/azure-media-lva-edge/swagger/README.md rename to sdk/media/azure-media-lva-edge/swagger/autorest.md index de1a5c4080de..1dcac1fff73e 100644 --- a/sdk/media/azure-media-lva-edge/swagger/README.md +++ b/sdk/media/azure-media-lva-edge/swagger/autorest.md @@ -5,21 +5,8 @@ see `https://aka.ms/autorest` ## Getting started ```ps cd -autorest --v3 --python README.md +autorest --v3 --python autorest.md ``` -## Key Concepts - -sample -## Examples -sample -## Troubleshooting -sample -## Next Steps - -sample - -## Contributing -sample ## Settings ```yaml From 9b388478b87de9e3a15062f2f970ea7fc9d9d923 Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 20 Nov 2020 12:03:29 -0800 Subject: [PATCH 38/64] fixed casing in read me file --- sdk/media/azure-media-lva-edge/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index 0b3e33a1f15e..11728924e8a7 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -18,14 +18,14 @@ After that, create the Configuration Store: ```Powershell az appconfig create --name --resource-group --location eastus ``` -## Key Concepts +## Key concepts sample ## Examples sample ## Troubleshooting sample -## Next Steps +## Next steps sample ## Contributing From a9cbd5ec6f36971aac57812b698ecf40dfde61ae Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 20 Nov 2020 12:25:12 -0800 Subject: [PATCH 39/64] fixing dependency requirments --- sdk/media/azure-media-lva-edge/setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/media/azure-media-lva-edge/setup.py b/sdk/media/azure-media-lva-edge/setup.py index d4a8c12edcc6..a4bfc61f9c6f 100644 --- a/sdk/media/azure-media-lva-edge/setup.py +++ b/sdk/media/azure-media-lva-edge/setup.py @@ -87,8 +87,8 @@ zip_safe=False, packages=find_packages(exclude=exclude_packages), install_requires=[ - "msrest>=0.6.10", - "azure-core<2.0.0,>=1.0.0", + "msrest>=0.5.0", + "azure-core<2.0.0,>=1.2.2", ], extras_require={ ":python_version<'3.0'": ['azure-nspkg'], From 88913a7260eef0f6ac7bedb9ce503f81a32f7c0f Mon Sep 17 00:00:00 2001 From: hivyas Date: Wed, 2 Dec 2020 08:23:23 -0800 Subject: [PATCH 40/64] updating README file --- sdk/media/azure-media-lva-edge/README.md | 71 +++++++++++++++---- .../azure-media-lva-edge/docs/DevTips.md | 40 +++++++++++ .../azure-media-lva-edge/swagger/autorest.md | 2 +- 3 files changed, 97 insertions(+), 16 deletions(-) create mode 100644 sdk/media/azure-media-lva-edge/docs/DevTips.md diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index 11728924e8a7..d2f2534482f7 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -1,33 +1,53 @@ -# Azure App Configuration client library for Python SDK Training +# Azure Live Video Analytics for IoT Edge client library for Python -Azure App Configuration is a managed service that helps developers centralize their application configurations simply and securely. +Live Video Analytics on IoT Edge provides a platform to build intelligent video applications that span the edge and the cloud. The platform offers the capability to capture, record, and analyze live video along with publishing the results, video and video analytics, to Azure services in the cloud or the edge. It is designed to be an extensible platform, enabling you to connect different video analysis edge modules (such as Cognitive services containers, custom edge modules built by you with open-source machine learning models or custom models trained with your own data) to it and use them to analyze live video without worrying about the complexity of building and running a live video pipeline. -Modern programs, especially programs running in a cloud, generally have many components that are distributed in nature. Spreading configuration settings across these components can lead to hard-to-troubleshoot errors during an application deployment. Use App Configuration to securely store all the settings for your application in one place. +Use the client library for Live Video Analytics on IoT Edge to: -Use the client library for App Configuration to create and manage application configuration settings. +- simplify interactions with the [Microsoft Azure IoT SDKs](https://github.com/azure/azure-iot-sdks) +- programatically construct media graph topologies and instances -## Getting started +[Package (PyPi)][package] | [Product documentation][doc_product] | [Direct methods][doc_direct_methods] | [Media graphs][doc_media_graph] | [Source code][source] | [Samples][samples] -* Python 2.7, or 3.5 or later is required to use this package. -* You need an [Azure subscription][azure_sub], and a [Configuration Store][configuration_store] to use this package. +## Getting started -To create a Configuration Store, you can use the Azure Portal or [Azure CLI][azure_cli]. +### Install the package -After that, create the Configuration Store: +Install the Live Video Analytics client library for Python with pip: -```Powershell -az appconfig create --name --resource-group --location eastus +```bash +pip install azure-lva-edge ``` +### Prerequisites + +* Python 2.7, or 3.5 or later is required to use this package. +* You need an [Azure subscription][azure_sub], and a [IOT device connection string][iot_device_connection_string] to use this package. + + ## Key concepts -sample +### Graph Topology vs Graph Instance +A graph topology is essentially the blueprint or template of a graph. It defines the parameters of the graph using placeholders as values for them. A graph instance references a graph topology and specifies the parameters. This way you are able to have multiple graph instances referencing the same topology but with different values for parameters. For more information please visit [Media graph topologies and instances][doc_media_graph] + +### CloudToDeviceMethod + +The `CloudToDeviceMethod` is part of the azure-iot-hub sdk. This method allows you to communicate one way notifications to a device in your iot hub. In our case we want to communicate various graph methods such as `MediaGraphTopologySetRequest` and `MediaGraphTopologyGetRequest`. To use `CloudToDeviceMethod` you need to pass in two parameters: `method_name` and `payload`. `method_name` should be the name of the media graph request you are sending. Each media graph request has a property called `method_name`. For example, `MediaGraphTopologySetRequest.method_name`. For the second parameter `payload` send the entire serialization of the media graph request. For example, `MediaGraphTopologySetRequest.serialize()` + ## Examples -sample + +[Samples][samples] + ## Troubleshooting -sample + +- When sending a method request using the IoT Hub's `CloudToDeviceMethod` remember to not type in the method request name directly. Instead use `[MethodRequestName.method_name]` +- Make sure to serialize the entire method request before passing it to `CloudToDeviceMethod` + ## Next steps -sample +- [Samples][samples] +- [Azure IoT Device SDK][iot-device-sdk] +- [Azure IoTHub Service SDK][iot-hub-sdk] + ## Contributing This project welcomes contributions and suggestions. Most contributions require @@ -44,3 +64,24 @@ This project has adopted the [Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, see the Code of Conduct FAQ or contact opencode@microsoft.com with any additional questions or comments. + + +[azure_cli]: https://docs.microsoft.com/cli/azure +[azure_sub]: https://azure.microsoft.com/free/ + +[cla]: https://cla.microsoft.com +[code_of_conduct]: https://opensource.microsoft.com/codeofconduct/ +[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ +[coc_contact]: mailto:opencode@microsoft.com + +[package]: TODO +[source]: TODO://link-to-path-in-the-SDK-repo +[samples]: https://github.com/Azure-Samples/live-video-analytics-iot-edge-python + +[doc_direct_methods]: https://docs.microsoft.com/azure/media-services/live-video-analytics-edge/direct-methods +[doc_media_graph]: https://docs.microsoft.com/azure/media-services/live-video-analytics-edge/media-graph-concept#media-graph-topologies-and-instances +[doc_product]: https://docs.microsoft.com/azure/media-services/live-video-analytics-edge/ + +[iot-device-sdk]: https://pypi.org/project/azure-iot-device/ +[iot-hub-sdk]: https://pypi.org/project/azure-iot-hub/ +[iot_device_connection_string]: https://docs.microsoft.com/en-us/azure/media-services/live-video-analytics-edge/get-started-detect-motion-emit-events-quickstart \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/docs/DevTips.md b/sdk/media/azure-media-lva-edge/docs/DevTips.md new file mode 100644 index 000000000000..f6dbf9e1277c --- /dev/null +++ b/sdk/media/azure-media-lva-edge/docs/DevTips.md @@ -0,0 +1,40 @@ +## How to update the lva sdk + +1. Clone the latest swagger onto your local machine +2. Replace the `require` field inside of `autorest.md` to point to your local swagger file +3. Generate the sdk using the autorest command which can be found inside the `autorest.md` file +4. Add any customization functions inside of `sdk\media\azure-media-lva-edge\azure\media\lva\edge\__init__.py`. Make sure the customization functions are outside of the `_generated` folder. +5. Update the README file and Changelog with the latest version number +6. Submit a PR + +## Running tox locally + +Tox is the testing and virtual environment management tool that is used to verify our sdk will be installed correctly with different Python versions and interpreters. To run tox follow these instructions + +``` +pip install tox tox-monorepo +cd path/to/target/folder +tox -c eng/tox/tox.ini +``` +To run a specific tox command from your directory use the following commands: +```bash +azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e sphinx +azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e lint +azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e mypy +azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e whl +azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e sdist +``` +A quick description of the five commands above: +* sphinx: documentation generation using the inline comments written in our code +* lint: runs pylint to make sure our code adheres to the style guidance +* mypy: runs the mypy static type checker for Python to make sure that our types are valid +* whl: creates a whl package for installing our package +* sdist: creates a zipped distribution of our files that the end user could install with pip + + +### Troubleshooting tox errors + +- Tox will complain if there are no tests. Add a dummy test in case you need to bypass this +- Make sure there is an `__init__.py` file inside of every directory inside of `azure` (Example: `azure/media` should have an __init__.py file) +- Follow the ReadMe guidelines outlined here: https://review.docs.microsoft.com/en-us/help/contribute-ref/contribute-ref-how-to-document-sdk?branch=master#readme. ReadMe titles are case SENSITIVE and use sentence casing. +- Make sure MANIFEST.in includes all required folders. (Most likely the required folders will be tests, samples, and the generated folder) diff --git a/sdk/media/azure-media-lva-edge/swagger/autorest.md b/sdk/media/azure-media-lva-edge/swagger/autorest.md index 1dcac1fff73e..48618fb331ed 100644 --- a/sdk/media/azure-media-lva-edge/swagger/autorest.md +++ b/sdk/media/azure-media-lva-edge/swagger/autorest.md @@ -5,7 +5,7 @@ see `https://aka.ms/autorest` ## Getting started ```ps cd -autorest --v3 --python autorest.md +autorest --v3 --python ``` ## Settings From a7e4e395408d2d81390500c58b5e80f423049468 Mon Sep 17 00:00:00 2001 From: hivyas Date: Wed, 2 Dec 2020 08:48:49 -0800 Subject: [PATCH 41/64] fixed broken links --- sdk/media/azure-media-lva-edge/README.md | 4 ++-- sdk/media/azure-media-lva-edge/docs/DevTips.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index d2f2534482f7..9380cdea6fe7 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -74,7 +74,7 @@ additional questions or comments. [coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ [coc_contact]: mailto:opencode@microsoft.com -[package]: TODO +[package]: placeholder [source]: TODO://link-to-path-in-the-SDK-repo [samples]: https://github.com/Azure-Samples/live-video-analytics-iot-edge-python @@ -84,4 +84,4 @@ additional questions or comments. [iot-device-sdk]: https://pypi.org/project/azure-iot-device/ [iot-hub-sdk]: https://pypi.org/project/azure-iot-hub/ -[iot_device_connection_string]: https://docs.microsoft.com/en-us/azure/media-services/live-video-analytics-edge/get-started-detect-motion-emit-events-quickstart \ No newline at end of file +[iot_device_connection_string]: https://docs.microsoft.com/azure/media-services/live-video-analytics-edge/get-started-detect-motion-emit-events-quickstart \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/docs/DevTips.md b/sdk/media/azure-media-lva-edge/docs/DevTips.md index f6dbf9e1277c..b649d500d873 100644 --- a/sdk/media/azure-media-lva-edge/docs/DevTips.md +++ b/sdk/media/azure-media-lva-edge/docs/DevTips.md @@ -36,5 +36,5 @@ A quick description of the five commands above: - Tox will complain if there are no tests. Add a dummy test in case you need to bypass this - Make sure there is an `__init__.py` file inside of every directory inside of `azure` (Example: `azure/media` should have an __init__.py file) -- Follow the ReadMe guidelines outlined here: https://review.docs.microsoft.com/en-us/help/contribute-ref/contribute-ref-how-to-document-sdk?branch=master#readme. ReadMe titles are case SENSITIVE and use sentence casing. +- Follow the ReadMe guidelines outlined here: https://review.docs.microsoft.com/help/contribute-ref/contribute-ref-how-to-document-sdk?branch=master#readme. ReadMe titles are case SENSITIVE and use sentence casing. - Make sure MANIFEST.in includes all required folders. (Most likely the required folders will be tests, samples, and the generated folder) From 4249244518c1b6a6ae0148d539840e1fc4e44213 Mon Sep 17 00:00:00 2001 From: hivyas Date: Wed, 2 Dec 2020 09:15:05 -0800 Subject: [PATCH 42/64] fixed broken link --- sdk/media/azure-media-lva-edge/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index 9380cdea6fe7..bf4d7cba4c86 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -74,7 +74,7 @@ additional questions or comments. [coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ [coc_contact]: mailto:opencode@microsoft.com -[package]: placeholder +[package]: TODO://link-to-published-package [source]: TODO://link-to-path-in-the-SDK-repo [samples]: https://github.com/Azure-Samples/live-video-analytics-iot-edge-python From 14a5809a77126a362d37e4d50928e82b74c0fcc8 Mon Sep 17 00:00:00 2001 From: hivyas Date: Wed, 2 Dec 2020 11:56:41 -0800 Subject: [PATCH 43/64] updated readme with examples --- sdk/media/azure-media-lva-edge/README.md | 48 +++++++++++++++++-- .../samples/sample_lva.py | 4 +- 2 files changed, 46 insertions(+), 6 deletions(-) diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index bf4d7cba4c86..814c44507c97 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -4,8 +4,8 @@ Live Video Analytics on IoT Edge provides a platform to build intelligent video Use the client library for Live Video Analytics on IoT Edge to: -- simplify interactions with the [Microsoft Azure IoT SDKs](https://github.com/azure/azure-iot-sdks) -- programatically construct media graph topologies and instances +- Simplify interactions with the [Microsoft Azure IoT SDKs](https://github.com/azure/azure-iot-sdks) +- Programatically construct media graph topologies and instances [Package (PyPi)][package] | [Product documentation][doc_product] | [Direct methods][doc_direct_methods] | [Media graphs][doc_media_graph] | [Source code][source] | [Samples][samples] @@ -23,7 +23,8 @@ pip install azure-lva-edge * Python 2.7, or 3.5 or later is required to use this package. * You need an [Azure subscription][azure_sub], and a [IOT device connection string][iot_device_connection_string] to use this package. - +### Creating a graph topology and making requests +Please visit the [Examples](#examples) for starter code ## Key concepts ### Graph Topology vs Graph Instance @@ -35,7 +36,46 @@ The `CloudToDeviceMethod` is part of the azure-iot-hub sdk. This method allows y ## Examples -[Samples][samples] +### Creating a graph topology +To create a graph topology you need to define parameters, sources, and sinks. +``` +#Parameters +user_name_param = MediaGraphParameterDeclaration(name="rtspUserName",type="String",default="dummyusername") +password_param = MediaGraphParameterDeclaration(name="rtspPassword",type="String",default="dummypassword") +url_param = MediaGraphParameterDeclaration(name="rtspUrl",type="String",default="rtsp://www.sample.com") + +#Source and Sink +source = MediaGraphRtspSource(name="rtspSource", endpoint=MediaGraphUnsecuredEndpoint(url="${rtspUrl}",credentials=MediaGraphUsernamePasswordCredentials(username="${rtspUserName}",password="${rtspPassword}"))) +node = MediaGraphNodeInput(node_name="rtspSource") +sink = MediaGraphAssetSink(name="assetsink", inputs=[node],asset_name_pattern='sampleAsset-${System.GraphTopologyName}-${System.GraphInstanceName}', segment_length="PT0H0M30S",local_media_cache_maximum_size_mi_b=2048,local_media_cache_path="/var/lib/azuremediaservices/tmp/") + +graph_properties = MediaGraphTopologyProperties(parameters=[user_name_param, password_param, url_param], sources=[source], sinks=[sink], description="Continuous video recording to an Azure Media Services Asset") + +graph_topology = MediaGraphTopology(name=graph_topology_name,properties=graph_properties) + +``` + +### Creating a graph instance +To create a graph instance, you need to have an existing graph topology. +``` +url_param = MediaGraphParameterDefinition(name="rtspUrl", value=graph_url) +graph_instance_properties = MediaGraphInstanceProperties(description="Sample graph description", topology_name=graph_topology_name, parameters=[url_param]) + +graph_instance = MediaGraphInstance(name=graph_instance_name, properties=graph_instance_properties) + +``` + +### Invoking a graph method request +To invoke a graph method on your device you need to first define the request using the lva sdk. Then send that method request using the iot sdk's `CloudToDeviceMethod` +``` +set_method_request = MediaGraphTopologySetRequest(graph=graph_topology) +direct_method = CloudToDeviceMethod(method_name=set_method_request.method_name, payload=set_method_request.serialize()) +registry_manager = IoTHubRegistryManager(connection_string) + +registry_manager.invoke_device_module_method(device_id, module_d, direct_method) +``` + +For more samples please visit [Samples][samples]. ## Troubleshooting diff --git a/sdk/media/azure-media-lva-edge/samples/sample_lva.py b/sdk/media/azure-media-lva-edge/samples/sample_lva.py index 9b5e91818af6..46a5d64d3c39 100644 --- a/sdk/media/azure-media-lva-edge/samples/sample_lva.py +++ b/sdk/media/azure-media-lva-edge/samples/sample_lva.py @@ -11,7 +11,7 @@ connection_string = os.getenv("IOTHUB_DEVICE_CONNECTION_STRING") graph_instance_name = "graphInstance1" graph_topology_name = "graphTopology1" - +graph_url = '"rtsp://sample-url-from-camera"' def build_graph_topology(): graph_properties = MediaGraphTopologyProperties() @@ -31,7 +31,7 @@ def build_graph_topology(): return graph def build_graph_instance(): - url_param = MediaGraphParameterDefinition(name="rtspUrl", value="rtsp://rtspsim:554/media/camera-300s.mkv") + url_param = MediaGraphParameterDefinition(name="rtspUrl", value=graph_url) graph_instance_properties = MediaGraphInstanceProperties(description="Sample graph description", topology_name=graph_topology_name, parameters=[url_param]) graph_instance = MediaGraphInstance(name=graph_instance_name, properties=graph_instance_properties) From cd0cc32617899cee2614542473167aba372a3360 Mon Sep 17 00:00:00 2001 From: hivyas Date: Wed, 2 Dec 2020 12:55:00 -0800 Subject: [PATCH 44/64] updating readme based on comments --- sdk/media/azure-media-lva-edge/README.md | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md index 814c44507c97..278fb71a1cb7 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-lva-edge/README.md @@ -21,18 +21,22 @@ pip install azure-lva-edge ### Prerequisites * Python 2.7, or 3.5 or later is required to use this package. -* You need an [Azure subscription][azure_sub], and a [IOT device connection string][iot_device_connection_string] to use this package. +* You need an active [Azure subscription][azure_sub], and a [IoT device connection string][iot_device_connection_string] to use this package. ### Creating a graph topology and making requests Please visit the [Examples](#examples) for starter code ## Key concepts -### Graph Topology vs Graph Instance -A graph topology is essentially the blueprint or template of a graph. It defines the parameters of the graph using placeholders as values for them. A graph instance references a graph topology and specifies the parameters. This way you are able to have multiple graph instances referencing the same topology but with different values for parameters. For more information please visit [Media graph topologies and instances][doc_media_graph] +### MediaGraph Topology vs MediaGraph Instance +A _graph topology_ is a blueprint or template of a graph. It defines the parameters of the graph using placeholders as values for them. A _graph instance_ references a graph topology and specifies the parameters. This way you are able to have multiple graph instances referencing the same topology but with different values for parameters. For more information please visit [Media graph topologies and instances][doc_media_graph] ### CloudToDeviceMethod -The `CloudToDeviceMethod` is part of the azure-iot-hub sdk. This method allows you to communicate one way notifications to a device in your iot hub. In our case we want to communicate various graph methods such as `MediaGraphTopologySetRequest` and `MediaGraphTopologyGetRequest`. To use `CloudToDeviceMethod` you need to pass in two parameters: `method_name` and `payload`. `method_name` should be the name of the media graph request you are sending. Each media graph request has a property called `method_name`. For example, `MediaGraphTopologySetRequest.method_name`. For the second parameter `payload` send the entire serialization of the media graph request. For example, `MediaGraphTopologySetRequest.serialize()` +The `CloudToDeviceMethod` is part of the [azure-iot-hub SDk][iot-hub-sdk]. This method allows you to communicate one way notifications to a device in your IoT hub. In our case, we want to communicate various graph methods such as `MediaGraphTopologySetRequest` and `MediaGraphTopologyGetRequest`. To use `CloudToDeviceMethod` you need to pass in two parameters: `method_name` and `payload`. + +The first parameter, `method_name`, is the name of the media graph request you are sending. Make sure to use each method's predefined `method_name` property. For example, `MediaGraphTopologySetRequest.method_name`. + +The second parameter, `payload`, sends the entire serialization of the media graph request. For example, `MediaGraphTopologySetRequest.serialize()` ## Examples @@ -42,7 +46,7 @@ To create a graph topology you need to define parameters, sources, and sinks. #Parameters user_name_param = MediaGraphParameterDeclaration(name="rtspUserName",type="String",default="dummyusername") password_param = MediaGraphParameterDeclaration(name="rtspPassword",type="String",default="dummypassword") -url_param = MediaGraphParameterDeclaration(name="rtspUrl",type="String",default="rtsp://www.sample.com") +url_param = MediaGraphParameterDeclaration(name="rtspUrl",type="String",default="rtsp://rtspsim:554/media/camera-300s.mkv") #Source and Sink source = MediaGraphRtspSource(name="rtspSource", endpoint=MediaGraphUnsecuredEndpoint(url="${rtspUrl}",credentials=MediaGraphUsernamePasswordCredentials(username="${rtspUserName}",password="${rtspPassword}"))) @@ -75,7 +79,7 @@ registry_manager = IoTHubRegistryManager(connection_string) registry_manager.invoke_device_module_method(device_id, module_d, direct_method) ``` -For more samples please visit [Samples][samples]. +To try different media graph topologies with the SDK, please see the official [Samples][samples]. ## Troubleshooting @@ -95,6 +99,8 @@ you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. +If you encounter any issues, please open an issue on our [Github][github-page-issues]. + When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only @@ -115,7 +121,7 @@ additional questions or comments. [coc_contact]: mailto:opencode@microsoft.com [package]: TODO://link-to-published-package -[source]: TODO://link-to-path-in-the-SDK-repo +[source]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/media/azure-media-lva-edge [samples]: https://github.com/Azure-Samples/live-video-analytics-iot-edge-python [doc_direct_methods]: https://docs.microsoft.com/azure/media-services/live-video-analytics-edge/direct-methods @@ -124,4 +130,6 @@ additional questions or comments. [iot-device-sdk]: https://pypi.org/project/azure-iot-device/ [iot-hub-sdk]: https://pypi.org/project/azure-iot-hub/ -[iot_device_connection_string]: https://docs.microsoft.com/azure/media-services/live-video-analytics-edge/get-started-detect-motion-emit-events-quickstart \ No newline at end of file +[iot_device_connection_string]: https://docs.microsoft.com/azure/media-services/live-video-analytics-edge/get-started-detect-motion-emit-events-quickstart + +[github-page-issues]: https://github.com/Azure/azure-sdk-for-python/issues \ No newline at end of file From 5314dd96eb5a8e5d711602a53147ea9d77536a20 Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 3 Dec 2020 08:50:08 -0800 Subject: [PATCH 45/64] changing package name --- .../CHANGELOG.md | 0 .../MANIFEST.in | 0 .../README.md | 2 +- .../azure/__init__.py | 0 .../azure/media/__init__.py | 0 .../media/livevideoanalytics}/__init__.py | 0 .../livevideoanalytics}/edge/__init__.py | 0 .../edge/_generated/__init__.py | 0 .../edge/_generated/_version.py | 0 .../edge/_generated/models/__init__.py | 0 ...r_live_video_analyticson_io_tedge_enums.py | 0 .../edge/_generated/models/_models.py | 0 .../edge/_generated/models/_models_py3.py | 0 .../edge/_generated/py.typed | 0 .../livevideoanalytics}/edge/_version.py | 0 .../media/lva/edge/_generated/__init__.py | 1 + .../media/lva/edge/_generated/_version.py | 9 + .../lva/edge/_generated/models/__init__.py | 199 ++ ...r_live_video_analyticson_io_tedge_enums.py | 108 + .../lva/edge/_generated/models/_models.py | 2008 +++++++++++++++ .../lva/edge/_generated/models/_models_py3.py | 2185 +++++++++++++++++ .../azure/media/lva/edge/_generated/py.typed | 1 + .../dev_requirements.txt | 0 .../docs/DevTips.md | 10 +- .../samples/sample_conditional_async.py | 0 .../samples/sample_hello_world.py | 0 .../samples/sample_lva.py | 0 .../sdk_packaging.toml | 0 .../setup.cfg | 0 .../setup.py | 2 +- .../swagger/autorest.md | 2 +- .../swagger/commandOutput.txt | 0 .../tests/conftest.py | 0 .../tests/test_app_config.py | 0 34 files changed, 4519 insertions(+), 8 deletions(-) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/CHANGELOG.md (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/MANIFEST.in (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/README.md (99%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/azure/__init__.py (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/azure/media/__init__.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/__init__.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/__init__.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/_generated/__init__.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/_generated/_version.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/_generated/models/__init__.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/_generated/models/_models.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/_generated/models/_models_py3.py (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/_generated/py.typed (100%) rename sdk/media/{azure-media-lva-edge/azure/media/lva => azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics}/edge/_version.py (100%) create mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/__init__.py create mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/_version.py create mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/__init__.py create mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py create mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models.py create mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models_py3.py create mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/py.typed rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/dev_requirements.txt (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/docs/DevTips.md (80%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/samples/sample_conditional_async.py (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/samples/sample_hello_world.py (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/samples/sample_lva.py (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/sdk_packaging.toml (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/setup.cfg (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/setup.py (98%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/swagger/autorest.md (78%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/swagger/commandOutput.txt (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/tests/conftest.py (100%) rename sdk/media/{azure-media-lva-edge => azure-media-livevideoanalytics-edge}/tests/test_app_config.py (100%) diff --git a/sdk/media/azure-media-lva-edge/CHANGELOG.md b/sdk/media/azure-media-livevideoanalytics-edge/CHANGELOG.md similarity index 100% rename from sdk/media/azure-media-lva-edge/CHANGELOG.md rename to sdk/media/azure-media-livevideoanalytics-edge/CHANGELOG.md diff --git a/sdk/media/azure-media-lva-edge/MANIFEST.in b/sdk/media/azure-media-livevideoanalytics-edge/MANIFEST.in similarity index 100% rename from sdk/media/azure-media-lva-edge/MANIFEST.in rename to sdk/media/azure-media-livevideoanalytics-edge/MANIFEST.in diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-livevideoanalytics-edge/README.md similarity index 99% rename from sdk/media/azure-media-lva-edge/README.md rename to sdk/media/azure-media-livevideoanalytics-edge/README.md index 278fb71a1cb7..d2467be44b9e 100644 --- a/sdk/media/azure-media-lva-edge/README.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/README.md @@ -16,7 +16,7 @@ Use the client library for Live Video Analytics on IoT Edge to: Install the Live Video Analytics client library for Python with pip: ```bash -pip install azure-lva-edge +pip install azure-media-livevideoanalytics--edge ``` ### Prerequisites diff --git a/sdk/media/azure-media-lva-edge/azure/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/__init__.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/__init__.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/__init__.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/__init__.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/__init__.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/__init__.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/__init__.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/__init__.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/__init__.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/__init__.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/__init__.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/__init__.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/_version.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/_version.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/_version.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/_version.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/__init__.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/__init__.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/__init__.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models_py3.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/py.typed b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/py.typed similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/py.typed rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/py.typed diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_version.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_version.py similarity index 100% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_version.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_version.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/__init__.py new file mode 100644 index 000000000000..5960c353a898 --- /dev/null +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore \ No newline at end of file diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/_version.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/_version.py new file mode 100644 index 000000000000..31ed98425268 --- /dev/null +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "1.0" diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/__init__.py new file mode 100644 index 000000000000..2e389ab8ef9d --- /dev/null +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/__init__.py @@ -0,0 +1,199 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import ItemNonSetRequestBase + from ._models_py3 import MediaGraphAssetSink + from ._models_py3 import MediaGraphCertificateSource + from ._models_py3 import MediaGraphCognitiveServicesVisionExtension + from ._models_py3 import MediaGraphCredentials + from ._models_py3 import MediaGraphEndpoint + from ._models_py3 import MediaGraphExtensionProcessorBase + from ._models_py3 import MediaGraphFileSink + from ._models_py3 import MediaGraphFrameRateFilterProcessor + from ._models_py3 import MediaGraphGrpcExtension + from ._models_py3 import MediaGraphGrpcExtensionDataTransfer + from ._models_py3 import MediaGraphHttpExtension + from ._models_py3 import MediaGraphHttpHeaderCredentials + from ._models_py3 import MediaGraphImage + from ._models_py3 import MediaGraphImageFormat + from ._models_py3 import MediaGraphImageFormatEncoded + from ._models_py3 import MediaGraphImageFormatRaw + from ._models_py3 import MediaGraphImageScale + from ._models_py3 import MediaGraphInstance + from ._models_py3 import MediaGraphInstanceActivateRequest + from ._models_py3 import MediaGraphInstanceCollection + from ._models_py3 import MediaGraphInstanceDeActivateRequest + from ._models_py3 import MediaGraphInstanceDeleteRequest + from ._models_py3 import MediaGraphInstanceGetRequest + from ._models_py3 import MediaGraphInstanceListRequest + from ._models_py3 import MediaGraphInstanceProperties + from ._models_py3 import MediaGraphInstanceSetRequest + from ._models_py3 import MediaGraphInstanceSetRequestBody + from ._models_py3 import MediaGraphIoTHubMessageSink + from ._models_py3 import MediaGraphIoTHubMessageSource + from ._models_py3 import MediaGraphMotionDetectionProcessor + from ._models_py3 import MediaGraphNodeInput + from ._models_py3 import MediaGraphOutputSelector + from ._models_py3 import MediaGraphParameterDeclaration + from ._models_py3 import MediaGraphParameterDefinition + from ._models_py3 import MediaGraphPemCertificateList + from ._models_py3 import MediaGraphProcessor + from ._models_py3 import MediaGraphRtspSource + from ._models_py3 import MediaGraphSignalGateProcessor + from ._models_py3 import MediaGraphSink + from ._models_py3 import MediaGraphSource + from ._models_py3 import MediaGraphSystemData + from ._models_py3 import MediaGraphTlsEndpoint + from ._models_py3 import MediaGraphTlsValidationOptions + from ._models_py3 import MediaGraphTopology + from ._models_py3 import MediaGraphTopologyCollection + from ._models_py3 import MediaGraphTopologyDeleteRequest + from ._models_py3 import MediaGraphTopologyGetRequest + from ._models_py3 import MediaGraphTopologyListRequest + from ._models_py3 import MediaGraphTopologyProperties + from ._models_py3 import MediaGraphTopologySetRequest + from ._models_py3 import MediaGraphTopologySetRequestBody + from ._models_py3 import MediaGraphUnsecuredEndpoint + from ._models_py3 import MediaGraphUsernamePasswordCredentials + from ._models_py3 import OperationBase +except (SyntaxError, ImportError): + from ._models import ItemNonSetRequestBase # type: ignore + from ._models import MediaGraphAssetSink # type: ignore + from ._models import MediaGraphCertificateSource # type: ignore + from ._models import MediaGraphCognitiveServicesVisionExtension # type: ignore + from ._models import MediaGraphCredentials # type: ignore + from ._models import MediaGraphEndpoint # type: ignore + from ._models import MediaGraphExtensionProcessorBase # type: ignore + from ._models import MediaGraphFileSink # type: ignore + from ._models import MediaGraphFrameRateFilterProcessor # type: ignore + from ._models import MediaGraphGrpcExtension # type: ignore + from ._models import MediaGraphGrpcExtensionDataTransfer # type: ignore + from ._models import MediaGraphHttpExtension # type: ignore + from ._models import MediaGraphHttpHeaderCredentials # type: ignore + from ._models import MediaGraphImage # type: ignore + from ._models import MediaGraphImageFormat # type: ignore + from ._models import MediaGraphImageFormatEncoded # type: ignore + from ._models import MediaGraphImageFormatRaw # type: ignore + from ._models import MediaGraphImageScale # type: ignore + from ._models import MediaGraphInstance # type: ignore + from ._models import MediaGraphInstanceActivateRequest # type: ignore + from ._models import MediaGraphInstanceCollection # type: ignore + from ._models import MediaGraphInstanceDeActivateRequest # type: ignore + from ._models import MediaGraphInstanceDeleteRequest # type: ignore + from ._models import MediaGraphInstanceGetRequest # type: ignore + from ._models import MediaGraphInstanceListRequest # type: ignore + from ._models import MediaGraphInstanceProperties # type: ignore + from ._models import MediaGraphInstanceSetRequest # type: ignore + from ._models import MediaGraphInstanceSetRequestBody # type: ignore + from ._models import MediaGraphIoTHubMessageSink # type: ignore + from ._models import MediaGraphIoTHubMessageSource # type: ignore + from ._models import MediaGraphMotionDetectionProcessor # type: ignore + from ._models import MediaGraphNodeInput # type: ignore + from ._models import MediaGraphOutputSelector # type: ignore + from ._models import MediaGraphParameterDeclaration # type: ignore + from ._models import MediaGraphParameterDefinition # type: ignore + from ._models import MediaGraphPemCertificateList # type: ignore + from ._models import MediaGraphProcessor # type: ignore + from ._models import MediaGraphRtspSource # type: ignore + from ._models import MediaGraphSignalGateProcessor # type: ignore + from ._models import MediaGraphSink # type: ignore + from ._models import MediaGraphSource # type: ignore + from ._models import MediaGraphSystemData # type: ignore + from ._models import MediaGraphTlsEndpoint # type: ignore + from ._models import MediaGraphTlsValidationOptions # type: ignore + from ._models import MediaGraphTopology # type: ignore + from ._models import MediaGraphTopologyCollection # type: ignore + from ._models import MediaGraphTopologyDeleteRequest # type: ignore + from ._models import MediaGraphTopologyGetRequest # type: ignore + from ._models import MediaGraphTopologyListRequest # type: ignore + from ._models import MediaGraphTopologyProperties # type: ignore + from ._models import MediaGraphTopologySetRequest # type: ignore + from ._models import MediaGraphTopologySetRequestBody # type: ignore + from ._models import MediaGraphUnsecuredEndpoint # type: ignore + from ._models import MediaGraphUsernamePasswordCredentials # type: ignore + from ._models import OperationBase # type: ignore + +from ._definitionsfor_live_video_analyticson_io_tedge_enums import ( + MediaGraphGrpcExtensionDataTransferMode, + MediaGraphImageEncodingFormat, + MediaGraphImageFormatRawPixelFormat, + MediaGraphImageScaleMode, + MediaGraphInstanceState, + MediaGraphMotionDetectionSensitivity, + MediaGraphOutputSelectorOperator, + MediaGraphParameterType, + MediaGraphRtspTransport, +) + +__all__ = [ + 'ItemNonSetRequestBase', + 'MediaGraphAssetSink', + 'MediaGraphCertificateSource', + 'MediaGraphCognitiveServicesVisionExtension', + 'MediaGraphCredentials', + 'MediaGraphEndpoint', + 'MediaGraphExtensionProcessorBase', + 'MediaGraphFileSink', + 'MediaGraphFrameRateFilterProcessor', + 'MediaGraphGrpcExtension', + 'MediaGraphGrpcExtensionDataTransfer', + 'MediaGraphHttpExtension', + 'MediaGraphHttpHeaderCredentials', + 'MediaGraphImage', + 'MediaGraphImageFormat', + 'MediaGraphImageFormatEncoded', + 'MediaGraphImageFormatRaw', + 'MediaGraphImageScale', + 'MediaGraphInstance', + 'MediaGraphInstanceActivateRequest', + 'MediaGraphInstanceCollection', + 'MediaGraphInstanceDeActivateRequest', + 'MediaGraphInstanceDeleteRequest', + 'MediaGraphInstanceGetRequest', + 'MediaGraphInstanceListRequest', + 'MediaGraphInstanceProperties', + 'MediaGraphInstanceSetRequest', + 'MediaGraphInstanceSetRequestBody', + 'MediaGraphIoTHubMessageSink', + 'MediaGraphIoTHubMessageSource', + 'MediaGraphMotionDetectionProcessor', + 'MediaGraphNodeInput', + 'MediaGraphOutputSelector', + 'MediaGraphParameterDeclaration', + 'MediaGraphParameterDefinition', + 'MediaGraphPemCertificateList', + 'MediaGraphProcessor', + 'MediaGraphRtspSource', + 'MediaGraphSignalGateProcessor', + 'MediaGraphSink', + 'MediaGraphSource', + 'MediaGraphSystemData', + 'MediaGraphTlsEndpoint', + 'MediaGraphTlsValidationOptions', + 'MediaGraphTopology', + 'MediaGraphTopologyCollection', + 'MediaGraphTopologyDeleteRequest', + 'MediaGraphTopologyGetRequest', + 'MediaGraphTopologyListRequest', + 'MediaGraphTopologyProperties', + 'MediaGraphTopologySetRequest', + 'MediaGraphTopologySetRequestBody', + 'MediaGraphUnsecuredEndpoint', + 'MediaGraphUsernamePasswordCredentials', + 'OperationBase', + 'MediaGraphGrpcExtensionDataTransferMode', + 'MediaGraphImageEncodingFormat', + 'MediaGraphImageFormatRawPixelFormat', + 'MediaGraphImageScaleMode', + 'MediaGraphInstanceState', + 'MediaGraphMotionDetectionSensitivity', + 'MediaGraphOutputSelectorOperator', + 'MediaGraphParameterType', + 'MediaGraphRtspTransport', +] diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py new file mode 100644 index 000000000000..6e78e4728244 --- /dev/null +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum, EnumMeta +from six import with_metaclass + +class _CaseInsensitiveEnumMeta(EnumMeta): + def __getitem__(self, name): + return super().__getitem__(name.upper()) + + def __getattr__(cls, name): + """Return the enum member matching `name` + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + """ + try: + return cls._member_map_[name.upper()] + except KeyError: + raise AttributeError(name) + + +class MediaGraphGrpcExtensionDataTransferMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """How frame data should be transmitted to the inferencing engine. + """ + + EMBEDDED = "Embedded" #: Frames are transferred embedded into the gRPC messages. + SHARED_MEMORY = "SharedMemory" #: Frames are transferred through shared memory. + +class MediaGraphImageEncodingFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The different encoding formats that can be used for the image. + """ + + JPEG = "Jpeg" #: JPEG image format. + BMP = "Bmp" #: BMP image format. + PNG = "Png" #: PNG image format. + +class MediaGraphImageFormatRawPixelFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """pixel format + """ + + YUV420_P = "Yuv420p" #: Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples). + RGB565_BE = "Rgb565be" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian. + RGB565_LE = "Rgb565le" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian. + RGB555_BE = "Rgb555be" #: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined. + RGB555_LE = "Rgb555le" #: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined. + RGB24 = "Rgb24" #: Packed RGB 8:8:8, 24bpp, RGBRGB. + BGR24 = "Bgr24" #: Packed RGB 8:8:8, 24bpp, BGRBGR. + ARGB = "Argb" #: Packed ARGB 8:8:8:8, 32bpp, ARGBARGB. + RGBA = "Rgba" #: Packed RGBA 8:8:8:8, 32bpp, RGBARGBA. + ABGR = "Abgr" #: Packed ABGR 8:8:8:8, 32bpp, ABGRABGR. + BGRA = "Bgra" #: Packed BGRA 8:8:8:8, 32bpp, BGRABGRA. + +class MediaGraphImageScaleMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Describes the modes for scaling an input video frame into an image, before it is sent to an + inference engine. + """ + + PRESERVE_ASPECT_RATIO = "PreserveAspectRatio" #: Use the same aspect ratio as the input frame. + PAD = "Pad" #: Center pad the input frame to match the given dimensions. + STRETCH = "Stretch" #: Stretch input frame to match given dimensions. + +class MediaGraphInstanceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Allowed states for a graph Instance. + """ + + INACTIVE = "Inactive" #: Inactive state. + ACTIVATING = "Activating" #: Activating state. + ACTIVE = "Active" #: Active state. + DEACTIVATING = "Deactivating" #: Deactivating state. + +class MediaGraphMotionDetectionSensitivity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Enumeration that specifies the sensitivity of the motion detection processor. + """ + + LOW = "Low" #: Low Sensitivity. + MEDIUM = "Medium" #: Medium Sensitivity. + HIGH = "High" #: High Sensitivity. + +class MediaGraphOutputSelectorOperator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The operator to compare streams by. + """ + + IS_ENUM = "is" #: A media type is the same type or a subtype. + IS_NOT = "isNot" #: A media type is not the same type or a subtype. + +class MediaGraphParameterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """name + """ + + STRING = "String" #: A string parameter value. + SECRET_STRING = "SecretString" #: A string to hold sensitive information as parameter value. + INT = "Int" #: A 32-bit signed integer as parameter value. + DOUBLE = "Double" #: A 64-bit double-precision floating point type as parameter value. + BOOL = "Bool" #: A boolean value that is either true or false. + +class MediaGraphRtspTransport(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Underlying RTSP transport. This is used to enable or disable HTTP tunneling. + """ + + HTTP = "Http" #: HTTP/HTTPS transport. This should be used when HTTP tunneling is desired. + TCP = "Tcp" #: TCP transport. This should be used when HTTP tunneling is NOT desired. diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models.py new file mode 100644 index 000000000000..62f58c7ea385 --- /dev/null +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models.py @@ -0,0 +1,2008 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import msrest.serialization + + +class OperationBase(msrest.serialization.Model): + """OperationBase. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + _subtype_map = { + 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(OperationBase, self).__init__(**kwargs) + self.method_name = None # type: Optional[str] + + +class ItemNonSetRequestBase(OperationBase): + """ItemNonSetRequestBase. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphInstanceActivateRequest, MediaGraphInstanceDeActivateRequest, MediaGraphInstanceDeleteRequest, MediaGraphInstanceGetRequest, MediaGraphTopologyDeleteRequest, MediaGraphTopologyGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(ItemNonSetRequestBase, self).__init__(**kwargs) + self.method_name = 'ItemNonSetRequestBase' # type: str + self.name = kwargs['name'] + + +class MediaGraphSink(msrest.serialization.Model): + """Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphAssetSink, MediaGraphFileSink, MediaGraphIoTHubMessageSink. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphAssetSink': 'MediaGraphAssetSink', '#Microsoft.Media.MediaGraphFileSink': 'MediaGraphFileSink', '#Microsoft.Media.MediaGraphIoTHubMessageSink': 'MediaGraphIoTHubMessageSink'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSink, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = kwargs['name'] + self.inputs = kwargs['inputs'] + + +class MediaGraphAssetSink(MediaGraphSink): + """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param asset_name_pattern: A name pattern when creating new assets. + :type asset_name_pattern: str + :param segment_length: When writing media to an asset, wait until at least this duration of + media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum + of 30 seconds and a recommended maximum of 5 minutes. + :type segment_length: ~datetime.timedelta + :param local_media_cache_path: Path to a local file system directory for temporary caching of + media, before writing to an Asset. Used when the Edge device is temporarily disconnected from + Azure. + :type local_media_cache_path: str + :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for + temporary caching of media. + :type local_media_cache_maximum_size_mi_b: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, + 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, + 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, + 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphAssetSink, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphAssetSink' # type: str + self.asset_name_pattern = kwargs.get('asset_name_pattern', None) + self.segment_length = kwargs.get('segment_length', None) + self.local_media_cache_path = kwargs.get('local_media_cache_path', None) + self.local_media_cache_maximum_size_mi_b = kwargs.get('local_media_cache_maximum_size_mi_b', None) + + +class MediaGraphCertificateSource(msrest.serialization.Model): + """Base class for certificate sources. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphPemCertificateList. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphPemCertificateList': 'MediaGraphPemCertificateList'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCertificateSource, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphProcessor(msrest.serialization.Model): + """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphProcessor, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = kwargs['name'] + self.inputs = kwargs['inputs'] + + +class MediaGraphExtensionProcessorBase(MediaGraphProcessor): + """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension': 'MediaGraphCognitiveServicesVisionExtension', '#Microsoft.Media.MediaGraphGrpcExtension': 'MediaGraphGrpcExtension', '#Microsoft.Media.MediaGraphHttpExtension': 'MediaGraphHttpExtension'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphExtensionProcessorBase, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str + self.endpoint = kwargs.get('endpoint', None) + self.image = kwargs.get('image', None) + + +class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCognitiveServicesVisionExtension, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension' # type: str + + +class MediaGraphCredentials(msrest.serialization.Model): + """Credentials to present during authentication. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphHttpHeaderCredentials, MediaGraphUsernamePasswordCredentials. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphHttpHeaderCredentials': 'MediaGraphHttpHeaderCredentials', '#Microsoft.Media.MediaGraphUsernamePasswordCredentials': 'MediaGraphUsernamePasswordCredentials'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCredentials, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphEndpoint(msrest.serialization.Model): + """Base class for endpoints. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphTlsEndpoint, MediaGraphUnsecuredEndpoint. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphTlsEndpoint': 'MediaGraphTlsEndpoint', '#Microsoft.Media.MediaGraphUnsecuredEndpoint': 'MediaGraphUnsecuredEndpoint'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphEndpoint, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.credentials = kwargs.get('credentials', None) + self.url = kwargs['url'] + + +class MediaGraphFileSink(MediaGraphSink): + """Enables a media graph to write/store media (video and audio) to a file on the Edge device. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param file_path_pattern: Required. Absolute file path pattern for creating new files on the + Edge device. + :type file_path_pattern: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + 'file_path_pattern': {'required': True, 'min_length': 1}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphFileSink, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str + self.file_path_pattern = kwargs['file_path_pattern'] + + +class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): + """Limits the frame rate on the input video stream based on the maximumFps property. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not + exceed this limit. + :type maximum_fps: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphFrameRateFilterProcessor, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str + self.maximum_fps = kwargs.get('maximum_fps', None) + + +class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + :param data_transfer: Required. How media should be transferred to the inferencing engine. + :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + 'data_transfer': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphGrpcExtension, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str + self.data_transfer = kwargs['data_transfer'] + + +class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): + """Describes how media should be transferred to the inferencing engine. + + All required parameters must be populated in order to send to Azure. + + :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if + mode is SharedMemory. Should not be specificed otherwise. + :type shared_memory_size_mi_b: str + :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible + values include: "Embedded", "SharedMemory". + :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode + """ + + _validation = { + 'mode': {'required': True}, + } + + _attribute_map = { + 'shared_memory_size_mi_b': {'key': 'sharedMemorySizeMiB', 'type': 'str'}, + 'mode': {'key': 'mode', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphGrpcExtensionDataTransfer, self).__init__(**kwargs) + self.shared_memory_size_mi_b = kwargs.get('shared_memory_size_mi_b', None) + self.mode = kwargs['mode'] + + +class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphHttpExtension, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphHttpExtension' # type: str + + +class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): + """Http header service credentials. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param header_name: Required. HTTP header name. + :type header_name: str + :param header_value: Required. HTTP header value. + :type header_value: str + """ + + _validation = { + 'type': {'required': True}, + 'header_name': {'required': True}, + 'header_value': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'header_name': {'key': 'headerName', 'type': 'str'}, + 'header_value': {'key': 'headerValue', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphHttpHeaderCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphHttpHeaderCredentials' # type: str + self.header_name = kwargs['header_name'] + self.header_value = kwargs['header_value'] + + +class MediaGraphImage(msrest.serialization.Model): + """Describes the properties of an image frame. + + :param scale: The scaling mode for the image. + :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale + :param format: Encoding settings for an image. + :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat + """ + + _attribute_map = { + 'scale': {'key': 'scale', 'type': 'MediaGraphImageScale'}, + 'format': {'key': 'format', 'type': 'MediaGraphImageFormat'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImage, self).__init__(**kwargs) + self.scale = kwargs.get('scale', None) + self.format = kwargs.get('format', None) + + +class MediaGraphImageFormat(msrest.serialization.Model): + """Encoding settings for an image. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormat, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphImageFormatEncoded(MediaGraphImageFormat): + """Allowed formats for the image. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param encoding: The different encoding formats that can be used for the image. Possible values + include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". + :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat + :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best + quality). + :type quality: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'encoding': {'key': 'encoding', 'type': 'str'}, + 'quality': {'key': 'quality', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str + self.encoding = kwargs.get('encoding', "Jpeg") + self.quality = kwargs.get('quality', None) + + +class MediaGraphImageFormatRaw(MediaGraphImageFormat): + """Encoding settings for raw images. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", + "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". + :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'pixel_format': {'key': 'pixelFormat', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormatRaw, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatRaw' # type: str + self.pixel_format = kwargs.get('pixel_format', None) + + +class MediaGraphImageScale(msrest.serialization.Model): + """The scaling mode for the image. + + :param mode: Describes the modes for scaling an input video frame into an image, before it is + sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". + :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode + :param width: The desired output width of the image. + :type width: str + :param height: The desired output height of the image. + :type height: str + """ + + _attribute_map = { + 'mode': {'key': 'mode', 'type': 'str'}, + 'width': {'key': 'width', 'type': 'str'}, + 'height': {'key': 'height', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageScale, self).__init__(**kwargs) + self.mode = kwargs.get('mode', None) + self.width = kwargs.get('width', None) + self.height = kwargs.get('height', None) + + +class MediaGraphInstance(msrest.serialization.Model): + """Represents a Media Graph instance. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Properties of a Media Graph instance. + :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstance, self).__init__(**kwargs) + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): + """MediaGraphInstanceActivateRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceActivateRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceActivate' # type: str + + +class MediaGraphInstanceCollection(msrest.serialization.Model): + """Collection of graph instances. + + :param value: Collection of graph instances. + :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] + :param continuation_token: Continuation token to use in subsequent calls to enumerate through + the graph instance collection (when the collection contains too many results to return in one + response). + :type continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[MediaGraphInstance]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceCollection, self).__init__(**kwargs) + self.value = kwargs.get('value', None) + self.continuation_token = kwargs.get('continuation_token', None) + + +class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): + """MediaGraphInstanceDeActivateRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceDeActivateRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceDeactivate' # type: str + + +class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): + """MediaGraphInstanceDeleteRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceDeleteRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceDelete' # type: str + + +class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): + """MediaGraphInstanceGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceGetRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceGet' # type: str + + +class MediaGraphInstanceListRequest(OperationBase): + """MediaGraphInstanceListRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceListRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceList' # type: str + + +class MediaGraphInstanceProperties(msrest.serialization.Model): + """Properties of a Media Graph instance. + + :param description: An optional description for the instance. + :type description: str + :param topology_name: The name of the graph topology that this instance will run. A topology + with this name should already have been set in the Edge module. + :type topology_name: str + :param parameters: List of one or more graph instance parameters. + :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] + :param state: Allowed states for a graph Instance. Possible values include: "Inactive", + "Activating", "Active", "Deactivating". + :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'topology_name': {'key': 'topologyName', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDefinition]'}, + 'state': {'key': 'state', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceProperties, self).__init__(**kwargs) + self.description = kwargs.get('description', None) + self.topology_name = kwargs.get('topology_name', None) + self.parameters = kwargs.get('parameters', None) + self.state = kwargs.get('state', None) + + +class MediaGraphInstanceSetRequest(OperationBase): + """MediaGraphInstanceSetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param instance: Required. Represents a Media Graph instance. + :type instance: ~azure.media.lva.edge.models.MediaGraphInstance + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'instance': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceSetRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceSet' # type: str + self.instance = kwargs['instance'] + + +class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): + """MediaGraphInstanceSetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Properties of a Media Graph instance. + :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceSetRequestBody, self).__init__(**kwargs) + self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str + self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class MediaGraphIoTHubMessageSink(MediaGraphSink): + """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param hub_output_name: Name of the output path to which the graph will publish message. These + messages can then be delivered to desired destinations by declaring routes referencing the + output path in the IoT Edge deployment manifest. + :type hub_output_name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'hub_output_name': {'key': 'hubOutputName', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphIoTHubMessageSink, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSink' # type: str + self.hub_output_name = kwargs.get('hub_output_name', None) + + +class MediaGraphSource(msrest.serialization.Model): + """Media graph source. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphIoTHubMessageSource': 'MediaGraphIoTHubMessageSource', '#Microsoft.Media.MediaGraphRtspSource': 'MediaGraphRtspSource'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSource, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = kwargs['name'] + + +class MediaGraphIoTHubMessageSource(MediaGraphSource): + """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + :param hub_input_name: Name of the input path where messages can be routed to (via routes + declared in the IoT Edge deployment manifest). + :type hub_input_name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'hub_input_name': {'key': 'hubInputName', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphIoTHubMessageSource, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSource' # type: str + self.hub_input_name = kwargs.get('hub_input_name', None) + + +class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): + """A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param sensitivity: Enumeration that specifies the sensitivity of the motion detection + processor. Possible values include: "Low", "Medium", "High". + :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity + :param output_motion_region: Indicates whether the processor should detect and output the + regions, within the video frame, where motion was detected. Default is true. + :type output_motion_region: bool + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, + 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphMotionDetectionProcessor, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str + self.sensitivity = kwargs.get('sensitivity', None) + self.output_motion_region = kwargs.get('output_motion_region', None) + + +class MediaGraphNodeInput(msrest.serialization.Model): + """Represents the input to any node in a media graph. + + :param node_name: The name of another node in the media graph, the output of which is used as + input to this node. + :type node_name: str + :param output_selectors: Allows for the selection of particular streams from another node. + :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] + """ + + _attribute_map = { + 'node_name': {'key': 'nodeName', 'type': 'str'}, + 'output_selectors': {'key': 'outputSelectors', 'type': '[MediaGraphOutputSelector]'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphNodeInput, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) + self.output_selectors = kwargs.get('output_selectors', None) + + +class MediaGraphOutputSelector(msrest.serialization.Model): + """Allows for the selection of particular streams from another node. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar property: The stream property to compare with. Default value: "mediaType". + :vartype property: str + :param operator: The operator to compare streams by. Possible values include: "is", "isNot". + :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator + :param value: Value to compare against. + :type value: str + """ + + _validation = { + 'property': {'constant': True}, + } + + _attribute_map = { + 'property': {'key': 'property', 'type': 'str'}, + 'operator': {'key': 'operator', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + property = "mediaType" + + def __init__( + self, + **kwargs + ): + super(MediaGraphOutputSelector, self).__init__(**kwargs) + self.operator = kwargs.get('operator', None) + self.value = kwargs.get('value', None) + + +class MediaGraphParameterDeclaration(msrest.serialization.Model): + """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the parameter. + :type name: str + :param type: Required. name. Possible values include: "String", "SecretString", "Int", + "Double", "Bool". + :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType + :param description: Description of the parameter. + :type description: str + :param default: The default value for the parameter, to be used if the graph instance does not + specify a value. + :type default: str + """ + + _validation = { + 'name': {'required': True, 'max_length': 64, 'min_length': 0}, + 'type': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'default': {'key': 'default', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphParameterDeclaration, self).__init__(**kwargs) + self.name = kwargs['name'] + self.type = kwargs['type'] + self.description = kwargs.get('description', None) + self.default = kwargs.get('default', None) + + +class MediaGraphParameterDefinition(msrest.serialization.Model): + """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. Name of parameter as defined in the graph topology. + :type name: str + :param value: Required. Value of parameter. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphParameterDefinition, self).__init__(**kwargs) + self.name = kwargs['name'] + self.value = kwargs['value'] + + +class MediaGraphPemCertificateList(MediaGraphCertificateSource): + """A list of PEM formatted certificates. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param certificates: Required. PEM formatted public certificates one per entry. + :type certificates: list[str] + """ + + _validation = { + 'type': {'required': True}, + 'certificates': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'certificates': {'key': 'certificates', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphPemCertificateList, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphPemCertificateList' # type: str + self.certificates = kwargs['certificates'] + + +class MediaGraphRtspSource(MediaGraphSource): + """Enables a graph to capture media from a RTSP server. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. + Possible values include: "Http", "Tcp". + :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport + :param endpoint: Required. RTSP endpoint of the stream that is being connected to. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'endpoint': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'transport': {'key': 'transport', 'type': 'str'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphRtspSource, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphRtspSource' # type: str + self.transport = kwargs.get('transport', None) + self.endpoint = kwargs['endpoint'] + + +class MediaGraphSignalGateProcessor(MediaGraphProcessor): + """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param activation_evaluation_window: The period of time over which the gate gathers input + events, before evaluating them. + :type activation_evaluation_window: str + :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It + is an offset between the time the event is received, and the timestamp of the first media + sample (eg. video frame) that is allowed through by the gate. + :type activation_signal_offset: str + :param minimum_activation_time: The minimum period for which the gate remains open, in the + absence of subsequent triggers (events). + :type minimum_activation_time: str + :param maximum_activation_time: The maximum period for which the gate remains open, in the + presence of subsequent events. + :type maximum_activation_time: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'activation_evaluation_window': {'key': 'activationEvaluationWindow', 'type': 'str'}, + 'activation_signal_offset': {'key': 'activationSignalOffset', 'type': 'str'}, + 'minimum_activation_time': {'key': 'minimumActivationTime', 'type': 'str'}, + 'maximum_activation_time': {'key': 'maximumActivationTime', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSignalGateProcessor, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphSignalGateProcessor' # type: str + self.activation_evaluation_window = kwargs.get('activation_evaluation_window', None) + self.activation_signal_offset = kwargs.get('activation_signal_offset', None) + self.minimum_activation_time = kwargs.get('minimum_activation_time', None) + self.maximum_activation_time = kwargs.get('maximum_activation_time', None) + + +class MediaGraphSystemData(msrest.serialization.Model): + """Graph system data. + + :param created_at: The timestamp of resource creation (UTC). + :type created_at: ~datetime.datetime + :param last_modified_at: The timestamp of resource last modification (UTC). + :type last_modified_at: ~datetime.datetime + """ + + _attribute_map = { + 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, + 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSystemData, self).__init__(**kwargs) + self.created_at = kwargs.get('created_at', None) + self.last_modified_at = kwargs.get('last_modified_at', None) + + +class MediaGraphTlsEndpoint(MediaGraphEndpoint): + """An endpoint that the graph can connect to, which must be connected over TLS/SSL. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null + designates that Azure Media Service's source of trust should be used. + :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource + :param validation_options: Validation options to use when authenticating a TLS connection. By + default, strict validation is used. + :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'MediaGraphCertificateSource'}, + 'validation_options': {'key': 'validationOptions', 'type': 'MediaGraphTlsValidationOptions'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTlsEndpoint, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphTlsEndpoint' # type: str + self.trusted_certificates = kwargs.get('trusted_certificates', None) + self.validation_options = kwargs.get('validation_options', None) + + +class MediaGraphTlsValidationOptions(msrest.serialization.Model): + """Options for controlling the authentication of TLS endpoints. + + :param ignore_hostname: Boolean value ignoring the host name (common name) during validation. + :type ignore_hostname: str + :param ignore_signature: Boolean value ignoring the integrity of the certificate chain at the + current time. + :type ignore_signature: str + """ + + _attribute_map = { + 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, + 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTlsValidationOptions, self).__init__(**kwargs) + self.ignore_hostname = kwargs.get('ignore_hostname', None) + self.ignore_signature = kwargs.get('ignore_signature', None) + + +class MediaGraphTopology(msrest.serialization.Model): + """Describes a graph topology. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Describes the properties of a graph topology. + :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopology, self).__init__(**kwargs) + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class MediaGraphTopologyCollection(msrest.serialization.Model): + """Collection of graph topologies. + + :param value: Collection of graph topologies. + :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] + :param continuation_token: Continuation token to use in subsequent calls to enumerate through + the graph topologies collection (when the collection contains too many results to return in one + response). + :type continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[MediaGraphTopology]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyCollection, self).__init__(**kwargs) + self.value = kwargs.get('value', None) + self.continuation_token = kwargs.get('continuation_token', None) + + +class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): + """MediaGraphTopologyDeleteRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyDeleteRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologyDelete' # type: str + + +class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): + """MediaGraphTopologyGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyGetRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologyGet' # type: str + + +class MediaGraphTopologyListRequest(OperationBase): + """MediaGraphTopologyListRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyListRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologyList' # type: str + + +class MediaGraphTopologyProperties(msrest.serialization.Model): + """Describes the properties of a graph topology. + + :param description: An optional description for the instance. + :type description: str + :param parameters: An optional description for the instance. + :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] + :param sources: An optional description for the instance. + :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] + :param processors: An optional description for the instance. + :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] + :param sinks: name. + :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDeclaration]'}, + 'sources': {'key': 'sources', 'type': '[MediaGraphSource]'}, + 'processors': {'key': 'processors', 'type': '[MediaGraphProcessor]'}, + 'sinks': {'key': 'sinks', 'type': '[MediaGraphSink]'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyProperties, self).__init__(**kwargs) + self.description = kwargs.get('description', None) + self.parameters = kwargs.get('parameters', None) + self.sources = kwargs.get('sources', None) + self.processors = kwargs.get('processors', None) + self.sinks = kwargs.get('sinks', None) + + +class MediaGraphTopologySetRequest(OperationBase): + """MediaGraphTopologySetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param graph: Required. Describes a graph topology. + :type graph: ~azure.media.lva.edge.models.MediaGraphTopology + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'graph': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologySetRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologySet' # type: str + self.graph = kwargs['graph'] + + +class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): + """MediaGraphTopologySetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Describes the properties of a graph topology. + :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologySetRequestBody, self).__init__(**kwargs) + self.method_name = 'MediaGraphTopologySetRequestBody' # type: str + self.method_name = 'MediaGraphTopologySetRequestBody' # type: str + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): + """An endpoint that the media graph can connect to, with no encryption in transit. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphUnsecuredEndpoint, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphUnsecuredEndpoint' # type: str + + +class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): + """Username/password credential pair. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param username: Required. Username for a username/password pair. + :type username: str + :param password: Password for a username/password pair. + :type password: str + """ + + _validation = { + 'type': {'required': True}, + 'username': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'username': {'key': 'username', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphUsernamePasswordCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphUsernamePasswordCredentials' # type: str + self.username = kwargs['username'] + self.password = kwargs.get('password', None) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models_py3.py new file mode 100644 index 000000000000..5de3adde8e11 --- /dev/null +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models_py3.py @@ -0,0 +1,2185 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +from typing import List, Optional, Union + +import msrest.serialization + +from ._definitionsfor_live_video_analyticson_io_tedge_enums import * + + +class OperationBase(msrest.serialization.Model): + """OperationBase. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + _subtype_map = { + 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(OperationBase, self).__init__(**kwargs) + self.method_name = None # type: Optional[str] + + +class ItemNonSetRequestBase(OperationBase): + """ItemNonSetRequestBase. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphInstanceActivateRequest, MediaGraphInstanceDeActivateRequest, MediaGraphInstanceDeleteRequest, MediaGraphInstanceGetRequest, MediaGraphTopologyDeleteRequest, MediaGraphTopologyGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(ItemNonSetRequestBase, self).__init__(**kwargs) + self.method_name = 'ItemNonSetRequestBase' # type: str + self.name = name + + +class MediaGraphSink(msrest.serialization.Model): + """Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphAssetSink, MediaGraphFileSink, MediaGraphIoTHubMessageSink. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphAssetSink': 'MediaGraphAssetSink', '#Microsoft.Media.MediaGraphFileSink': 'MediaGraphFileSink', '#Microsoft.Media.MediaGraphIoTHubMessageSink': 'MediaGraphIoTHubMessageSink'} + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + **kwargs + ): + super(MediaGraphSink, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = name + self.inputs = inputs + + +class MediaGraphAssetSink(MediaGraphSink): + """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param asset_name_pattern: A name pattern when creating new assets. + :type asset_name_pattern: str + :param segment_length: When writing media to an asset, wait until at least this duration of + media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum + of 30 seconds and a recommended maximum of 5 minutes. + :type segment_length: ~datetime.timedelta + :param local_media_cache_path: Path to a local file system directory for temporary caching of + media, before writing to an Asset. Used when the Edge device is temporarily disconnected from + Azure. + :type local_media_cache_path: str + :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for + temporary caching of media. + :type local_media_cache_maximum_size_mi_b: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, + 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, + 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, + 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + asset_name_pattern: Optional[str] = None, + segment_length: Optional[datetime.timedelta] = None, + local_media_cache_path: Optional[str] = None, + local_media_cache_maximum_size_mi_b: Optional[str] = None, + **kwargs + ): + super(MediaGraphAssetSink, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphAssetSink' # type: str + self.asset_name_pattern = asset_name_pattern + self.segment_length = segment_length + self.local_media_cache_path = local_media_cache_path + self.local_media_cache_maximum_size_mi_b = local_media_cache_maximum_size_mi_b + + +class MediaGraphCertificateSource(msrest.serialization.Model): + """Base class for certificate sources. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphPemCertificateList. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphPemCertificateList': 'MediaGraphPemCertificateList'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCertificateSource, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphProcessor(msrest.serialization.Model): + """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + **kwargs + ): + super(MediaGraphProcessor, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = name + self.inputs = inputs + + +class MediaGraphExtensionProcessorBase(MediaGraphProcessor): + """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension': 'MediaGraphCognitiveServicesVisionExtension', '#Microsoft.Media.MediaGraphGrpcExtension': 'MediaGraphGrpcExtension', '#Microsoft.Media.MediaGraphHttpExtension': 'MediaGraphHttpExtension'} + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + endpoint: Optional["MediaGraphEndpoint"] = None, + image: Optional["MediaGraphImage"] = None, + **kwargs + ): + super(MediaGraphExtensionProcessorBase, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str + self.endpoint = endpoint + self.image = image + + +class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + endpoint: Optional["MediaGraphEndpoint"] = None, + image: Optional["MediaGraphImage"] = None, + **kwargs + ): + super(MediaGraphCognitiveServicesVisionExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + self.type = '#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension' # type: str + + +class MediaGraphCredentials(msrest.serialization.Model): + """Credentials to present during authentication. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphHttpHeaderCredentials, MediaGraphUsernamePasswordCredentials. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphHttpHeaderCredentials': 'MediaGraphHttpHeaderCredentials', '#Microsoft.Media.MediaGraphUsernamePasswordCredentials': 'MediaGraphUsernamePasswordCredentials'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCredentials, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphEndpoint(msrest.serialization.Model): + """Base class for endpoints. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphTlsEndpoint, MediaGraphUnsecuredEndpoint. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphTlsEndpoint': 'MediaGraphTlsEndpoint', '#Microsoft.Media.MediaGraphUnsecuredEndpoint': 'MediaGraphUnsecuredEndpoint'} + } + + def __init__( + self, + *, + url: str, + credentials: Optional["MediaGraphCredentials"] = None, + **kwargs + ): + super(MediaGraphEndpoint, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.credentials = credentials + self.url = url + + +class MediaGraphFileSink(MediaGraphSink): + """Enables a media graph to write/store media (video and audio) to a file on the Edge device. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param file_path_pattern: Required. Absolute file path pattern for creating new files on the + Edge device. + :type file_path_pattern: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + 'file_path_pattern': {'required': True, 'min_length': 1}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + file_path_pattern: str, + **kwargs + ): + super(MediaGraphFileSink, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str + self.file_path_pattern = file_path_pattern + + +class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): + """Limits the frame rate on the input video stream based on the maximumFps property. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not + exceed this limit. + :type maximum_fps: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + maximum_fps: Optional[str] = None, + **kwargs + ): + super(MediaGraphFrameRateFilterProcessor, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str + self.maximum_fps = maximum_fps + + +class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + :param data_transfer: Required. How media should be transferred to the inferencing engine. + :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + 'data_transfer': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + data_transfer: "MediaGraphGrpcExtensionDataTransfer", + endpoint: Optional["MediaGraphEndpoint"] = None, + image: Optional["MediaGraphImage"] = None, + **kwargs + ): + super(MediaGraphGrpcExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str + self.data_transfer = data_transfer + + +class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): + """Describes how media should be transferred to the inferencing engine. + + All required parameters must be populated in order to send to Azure. + + :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if + mode is SharedMemory. Should not be specificed otherwise. + :type shared_memory_size_mi_b: str + :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible + values include: "Embedded", "SharedMemory". + :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode + """ + + _validation = { + 'mode': {'required': True}, + } + + _attribute_map = { + 'shared_memory_size_mi_b': {'key': 'sharedMemorySizeMiB', 'type': 'str'}, + 'mode': {'key': 'mode', 'type': 'str'}, + } + + def __init__( + self, + *, + mode: Union[str, "MediaGraphGrpcExtensionDataTransferMode"], + shared_memory_size_mi_b: Optional[str] = None, + **kwargs + ): + super(MediaGraphGrpcExtensionDataTransfer, self).__init__(**kwargs) + self.shared_memory_size_mi_b = shared_memory_size_mi_b + self.mode = mode + + +class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + endpoint: Optional["MediaGraphEndpoint"] = None, + image: Optional["MediaGraphImage"] = None, + **kwargs + ): + super(MediaGraphHttpExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + self.type = '#Microsoft.Media.MediaGraphHttpExtension' # type: str + + +class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): + """Http header service credentials. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param header_name: Required. HTTP header name. + :type header_name: str + :param header_value: Required. HTTP header value. + :type header_value: str + """ + + _validation = { + 'type': {'required': True}, + 'header_name': {'required': True}, + 'header_value': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'header_name': {'key': 'headerName', 'type': 'str'}, + 'header_value': {'key': 'headerValue', 'type': 'str'}, + } + + def __init__( + self, + *, + header_name: str, + header_value: str, + **kwargs + ): + super(MediaGraphHttpHeaderCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphHttpHeaderCredentials' # type: str + self.header_name = header_name + self.header_value = header_value + + +class MediaGraphImage(msrest.serialization.Model): + """Describes the properties of an image frame. + + :param scale: The scaling mode for the image. + :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale + :param format: Encoding settings for an image. + :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat + """ + + _attribute_map = { + 'scale': {'key': 'scale', 'type': 'MediaGraphImageScale'}, + 'format': {'key': 'format', 'type': 'MediaGraphImageFormat'}, + } + + def __init__( + self, + *, + scale: Optional["MediaGraphImageScale"] = None, + format: Optional["MediaGraphImageFormat"] = None, + **kwargs + ): + super(MediaGraphImage, self).__init__(**kwargs) + self.scale = scale + self.format = format + + +class MediaGraphImageFormat(msrest.serialization.Model): + """Encoding settings for an image. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormat, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphImageFormatEncoded(MediaGraphImageFormat): + """Allowed formats for the image. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param encoding: The different encoding formats that can be used for the image. Possible values + include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". + :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat + :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best + quality). + :type quality: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'encoding': {'key': 'encoding', 'type': 'str'}, + 'quality': {'key': 'quality', 'type': 'str'}, + } + + def __init__( + self, + *, + encoding: Optional[Union[str, "MediaGraphImageEncodingFormat"]] = "Jpeg", + quality: Optional[str] = None, + **kwargs + ): + super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str + self.encoding = encoding + self.quality = quality + + +class MediaGraphImageFormatRaw(MediaGraphImageFormat): + """Encoding settings for raw images. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", + "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". + :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'pixel_format': {'key': 'pixelFormat', 'type': 'str'}, + } + + def __init__( + self, + *, + pixel_format: Optional[Union[str, "MediaGraphImageFormatRawPixelFormat"]] = None, + **kwargs + ): + super(MediaGraphImageFormatRaw, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatRaw' # type: str + self.pixel_format = pixel_format + + +class MediaGraphImageScale(msrest.serialization.Model): + """The scaling mode for the image. + + :param mode: Describes the modes for scaling an input video frame into an image, before it is + sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". + :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode + :param width: The desired output width of the image. + :type width: str + :param height: The desired output height of the image. + :type height: str + """ + + _attribute_map = { + 'mode': {'key': 'mode', 'type': 'str'}, + 'width': {'key': 'width', 'type': 'str'}, + 'height': {'key': 'height', 'type': 'str'}, + } + + def __init__( + self, + *, + mode: Optional[Union[str, "MediaGraphImageScaleMode"]] = None, + width: Optional[str] = None, + height: Optional[str] = None, + **kwargs + ): + super(MediaGraphImageScale, self).__init__(**kwargs) + self.mode = mode + self.width = width + self.height = height + + +class MediaGraphInstance(msrest.serialization.Model): + """Represents a Media Graph instance. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Properties of a Media Graph instance. + :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, + } + + def __init__( + self, + *, + name: str, + system_data: Optional["MediaGraphSystemData"] = None, + properties: Optional["MediaGraphInstanceProperties"] = None, + **kwargs + ): + super(MediaGraphInstance, self).__init__(**kwargs) + self.name = name + self.system_data = system_data + self.properties = properties + + +class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): + """MediaGraphInstanceActivateRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphInstanceActivateRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphInstanceActivate' # type: str + + +class MediaGraphInstanceCollection(msrest.serialization.Model): + """Collection of graph instances. + + :param value: Collection of graph instances. + :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] + :param continuation_token: Continuation token to use in subsequent calls to enumerate through + the graph instance collection (when the collection contains too many results to return in one + response). + :type continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[MediaGraphInstance]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + *, + value: Optional[List["MediaGraphInstance"]] = None, + continuation_token: Optional[str] = None, + **kwargs + ): + super(MediaGraphInstanceCollection, self).__init__(**kwargs) + self.value = value + self.continuation_token = continuation_token + + +class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): + """MediaGraphInstanceDeActivateRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphInstanceDeActivateRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphInstanceDeactivate' # type: str + + +class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): + """MediaGraphInstanceDeleteRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphInstanceDeleteRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphInstanceDelete' # type: str + + +class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): + """MediaGraphInstanceGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphInstanceGetRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphInstanceGet' # type: str + + +class MediaGraphInstanceListRequest(OperationBase): + """MediaGraphInstanceListRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceListRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceList' # type: str + + +class MediaGraphInstanceProperties(msrest.serialization.Model): + """Properties of a Media Graph instance. + + :param description: An optional description for the instance. + :type description: str + :param topology_name: The name of the graph topology that this instance will run. A topology + with this name should already have been set in the Edge module. + :type topology_name: str + :param parameters: List of one or more graph instance parameters. + :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] + :param state: Allowed states for a graph Instance. Possible values include: "Inactive", + "Activating", "Active", "Deactivating". + :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'topology_name': {'key': 'topologyName', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDefinition]'}, + 'state': {'key': 'state', 'type': 'str'}, + } + + def __init__( + self, + *, + description: Optional[str] = None, + topology_name: Optional[str] = None, + parameters: Optional[List["MediaGraphParameterDefinition"]] = None, + state: Optional[Union[str, "MediaGraphInstanceState"]] = None, + **kwargs + ): + super(MediaGraphInstanceProperties, self).__init__(**kwargs) + self.description = description + self.topology_name = topology_name + self.parameters = parameters + self.state = state + + +class MediaGraphInstanceSetRequest(OperationBase): + """MediaGraphInstanceSetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param instance: Required. Represents a Media Graph instance. + :type instance: ~azure.media.lva.edge.models.MediaGraphInstance + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'instance': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + instance: "MediaGraphInstance", + **kwargs + ): + super(MediaGraphInstanceSetRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceSet' # type: str + self.instance = instance + + +class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): + """MediaGraphInstanceSetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Properties of a Media Graph instance. + :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + system_data: Optional["MediaGraphSystemData"] = None, + properties: Optional["MediaGraphInstanceProperties"] = None, + **kwargs + ): + super(MediaGraphInstanceSetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) + self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str + self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str + self.name = name + self.system_data = system_data + self.properties = properties + + +class MediaGraphIoTHubMessageSink(MediaGraphSink): + """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param hub_output_name: Name of the output path to which the graph will publish message. These + messages can then be delivered to desired destinations by declaring routes referencing the + output path in the IoT Edge deployment manifest. + :type hub_output_name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'hub_output_name': {'key': 'hubOutputName', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + hub_output_name: Optional[str] = None, + **kwargs + ): + super(MediaGraphIoTHubMessageSink, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSink' # type: str + self.hub_output_name = hub_output_name + + +class MediaGraphSource(msrest.serialization.Model): + """Media graph source. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphIoTHubMessageSource': 'MediaGraphIoTHubMessageSource', '#Microsoft.Media.MediaGraphRtspSource': 'MediaGraphRtspSource'} + } + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphSource, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = name + + +class MediaGraphIoTHubMessageSource(MediaGraphSource): + """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + :param hub_input_name: Name of the input path where messages can be routed to (via routes + declared in the IoT Edge deployment manifest). + :type hub_input_name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'hub_input_name': {'key': 'hubInputName', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + hub_input_name: Optional[str] = None, + **kwargs + ): + super(MediaGraphIoTHubMessageSource, self).__init__(name=name, **kwargs) + self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSource' # type: str + self.hub_input_name = hub_input_name + + +class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): + """A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param sensitivity: Enumeration that specifies the sensitivity of the motion detection + processor. Possible values include: "Low", "Medium", "High". + :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity + :param output_motion_region: Indicates whether the processor should detect and output the + regions, within the video frame, where motion was detected. Default is true. + :type output_motion_region: bool + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, + 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + sensitivity: Optional[Union[str, "MediaGraphMotionDetectionSensitivity"]] = None, + output_motion_region: Optional[bool] = None, + **kwargs + ): + super(MediaGraphMotionDetectionProcessor, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str + self.sensitivity = sensitivity + self.output_motion_region = output_motion_region + + +class MediaGraphNodeInput(msrest.serialization.Model): + """Represents the input to any node in a media graph. + + :param node_name: The name of another node in the media graph, the output of which is used as + input to this node. + :type node_name: str + :param output_selectors: Allows for the selection of particular streams from another node. + :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] + """ + + _attribute_map = { + 'node_name': {'key': 'nodeName', 'type': 'str'}, + 'output_selectors': {'key': 'outputSelectors', 'type': '[MediaGraphOutputSelector]'}, + } + + def __init__( + self, + *, + node_name: Optional[str] = None, + output_selectors: Optional[List["MediaGraphOutputSelector"]] = None, + **kwargs + ): + super(MediaGraphNodeInput, self).__init__(**kwargs) + self.node_name = node_name + self.output_selectors = output_selectors + + +class MediaGraphOutputSelector(msrest.serialization.Model): + """Allows for the selection of particular streams from another node. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar property: The stream property to compare with. Default value: "mediaType". + :vartype property: str + :param operator: The operator to compare streams by. Possible values include: "is", "isNot". + :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator + :param value: Value to compare against. + :type value: str + """ + + _validation = { + 'property': {'constant': True}, + } + + _attribute_map = { + 'property': {'key': 'property', 'type': 'str'}, + 'operator': {'key': 'operator', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + property = "mediaType" + + def __init__( + self, + *, + operator: Optional[Union[str, "MediaGraphOutputSelectorOperator"]] = None, + value: Optional[str] = None, + **kwargs + ): + super(MediaGraphOutputSelector, self).__init__(**kwargs) + self.operator = operator + self.value = value + + +class MediaGraphParameterDeclaration(msrest.serialization.Model): + """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the parameter. + :type name: str + :param type: Required. name. Possible values include: "String", "SecretString", "Int", + "Double", "Bool". + :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType + :param description: Description of the parameter. + :type description: str + :param default: The default value for the parameter, to be used if the graph instance does not + specify a value. + :type default: str + """ + + _validation = { + 'name': {'required': True, 'max_length': 64, 'min_length': 0}, + 'type': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'default': {'key': 'default', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + type: Union[str, "MediaGraphParameterType"], + description: Optional[str] = None, + default: Optional[str] = None, + **kwargs + ): + super(MediaGraphParameterDeclaration, self).__init__(**kwargs) + self.name = name + self.type = type + self.description = description + self.default = default + + +class MediaGraphParameterDefinition(msrest.serialization.Model): + """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. Name of parameter as defined in the graph topology. + :type name: str + :param value: Required. Value of parameter. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + value: str, + **kwargs + ): + super(MediaGraphParameterDefinition, self).__init__(**kwargs) + self.name = name + self.value = value + + +class MediaGraphPemCertificateList(MediaGraphCertificateSource): + """A list of PEM formatted certificates. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param certificates: Required. PEM formatted public certificates one per entry. + :type certificates: list[str] + """ + + _validation = { + 'type': {'required': True}, + 'certificates': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'certificates': {'key': 'certificates', 'type': '[str]'}, + } + + def __init__( + self, + *, + certificates: List[str], + **kwargs + ): + super(MediaGraphPemCertificateList, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphPemCertificateList' # type: str + self.certificates = certificates + + +class MediaGraphRtspSource(MediaGraphSource): + """Enables a graph to capture media from a RTSP server. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. + Possible values include: "Http", "Tcp". + :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport + :param endpoint: Required. RTSP endpoint of the stream that is being connected to. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'endpoint': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'transport': {'key': 'transport', 'type': 'str'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + } + + def __init__( + self, + *, + name: str, + endpoint: "MediaGraphEndpoint", + transport: Optional[Union[str, "MediaGraphRtspTransport"]] = None, + **kwargs + ): + super(MediaGraphRtspSource, self).__init__(name=name, **kwargs) + self.type = '#Microsoft.Media.MediaGraphRtspSource' # type: str + self.transport = transport + self.endpoint = endpoint + + +class MediaGraphSignalGateProcessor(MediaGraphProcessor): + """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param activation_evaluation_window: The period of time over which the gate gathers input + events, before evaluating them. + :type activation_evaluation_window: str + :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It + is an offset between the time the event is received, and the timestamp of the first media + sample (eg. video frame) that is allowed through by the gate. + :type activation_signal_offset: str + :param minimum_activation_time: The minimum period for which the gate remains open, in the + absence of subsequent triggers (events). + :type minimum_activation_time: str + :param maximum_activation_time: The maximum period for which the gate remains open, in the + presence of subsequent events. + :type maximum_activation_time: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'activation_evaluation_window': {'key': 'activationEvaluationWindow', 'type': 'str'}, + 'activation_signal_offset': {'key': 'activationSignalOffset', 'type': 'str'}, + 'minimum_activation_time': {'key': 'minimumActivationTime', 'type': 'str'}, + 'maximum_activation_time': {'key': 'maximumActivationTime', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + activation_evaluation_window: Optional[str] = None, + activation_signal_offset: Optional[str] = None, + minimum_activation_time: Optional[str] = None, + maximum_activation_time: Optional[str] = None, + **kwargs + ): + super(MediaGraphSignalGateProcessor, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphSignalGateProcessor' # type: str + self.activation_evaluation_window = activation_evaluation_window + self.activation_signal_offset = activation_signal_offset + self.minimum_activation_time = minimum_activation_time + self.maximum_activation_time = maximum_activation_time + + +class MediaGraphSystemData(msrest.serialization.Model): + """Graph system data. + + :param created_at: The timestamp of resource creation (UTC). + :type created_at: ~datetime.datetime + :param last_modified_at: The timestamp of resource last modification (UTC). + :type last_modified_at: ~datetime.datetime + """ + + _attribute_map = { + 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, + 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, + } + + def __init__( + self, + *, + created_at: Optional[datetime.datetime] = None, + last_modified_at: Optional[datetime.datetime] = None, + **kwargs + ): + super(MediaGraphSystemData, self).__init__(**kwargs) + self.created_at = created_at + self.last_modified_at = last_modified_at + + +class MediaGraphTlsEndpoint(MediaGraphEndpoint): + """An endpoint that the graph can connect to, which must be connected over TLS/SSL. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null + designates that Azure Media Service's source of trust should be used. + :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource + :param validation_options: Validation options to use when authenticating a TLS connection. By + default, strict validation is used. + :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'MediaGraphCertificateSource'}, + 'validation_options': {'key': 'validationOptions', 'type': 'MediaGraphTlsValidationOptions'}, + } + + def __init__( + self, + *, + url: str, + credentials: Optional["MediaGraphCredentials"] = None, + trusted_certificates: Optional["MediaGraphCertificateSource"] = None, + validation_options: Optional["MediaGraphTlsValidationOptions"] = None, + **kwargs + ): + super(MediaGraphTlsEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) + self.type = '#Microsoft.Media.MediaGraphTlsEndpoint' # type: str + self.trusted_certificates = trusted_certificates + self.validation_options = validation_options + + +class MediaGraphTlsValidationOptions(msrest.serialization.Model): + """Options for controlling the authentication of TLS endpoints. + + :param ignore_hostname: Boolean value ignoring the host name (common name) during validation. + :type ignore_hostname: str + :param ignore_signature: Boolean value ignoring the integrity of the certificate chain at the + current time. + :type ignore_signature: str + """ + + _attribute_map = { + 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, + 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, + } + + def __init__( + self, + *, + ignore_hostname: Optional[str] = None, + ignore_signature: Optional[str] = None, + **kwargs + ): + super(MediaGraphTlsValidationOptions, self).__init__(**kwargs) + self.ignore_hostname = ignore_hostname + self.ignore_signature = ignore_signature + + +class MediaGraphTopology(msrest.serialization.Model): + """Describes a graph topology. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Describes the properties of a graph topology. + :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, + } + + def __init__( + self, + *, + name: str, + system_data: Optional["MediaGraphSystemData"] = None, + properties: Optional["MediaGraphTopologyProperties"] = None, + **kwargs + ): + super(MediaGraphTopology, self).__init__(**kwargs) + self.name = name + self.system_data = system_data + self.properties = properties + + +class MediaGraphTopologyCollection(msrest.serialization.Model): + """Collection of graph topologies. + + :param value: Collection of graph topologies. + :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] + :param continuation_token: Continuation token to use in subsequent calls to enumerate through + the graph topologies collection (when the collection contains too many results to return in one + response). + :type continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[MediaGraphTopology]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + *, + value: Optional[List["MediaGraphTopology"]] = None, + continuation_token: Optional[str] = None, + **kwargs + ): + super(MediaGraphTopologyCollection, self).__init__(**kwargs) + self.value = value + self.continuation_token = continuation_token + + +class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): + """MediaGraphTopologyDeleteRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphTopologyDeleteRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphTopologyDelete' # type: str + + +class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): + """MediaGraphTopologyGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphTopologyGetRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphTopologyGet' # type: str + + +class MediaGraphTopologyListRequest(OperationBase): + """MediaGraphTopologyListRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyListRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologyList' # type: str + + +class MediaGraphTopologyProperties(msrest.serialization.Model): + """Describes the properties of a graph topology. + + :param description: An optional description for the instance. + :type description: str + :param parameters: An optional description for the instance. + :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] + :param sources: An optional description for the instance. + :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] + :param processors: An optional description for the instance. + :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] + :param sinks: name. + :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDeclaration]'}, + 'sources': {'key': 'sources', 'type': '[MediaGraphSource]'}, + 'processors': {'key': 'processors', 'type': '[MediaGraphProcessor]'}, + 'sinks': {'key': 'sinks', 'type': '[MediaGraphSink]'}, + } + + def __init__( + self, + *, + description: Optional[str] = None, + parameters: Optional[List["MediaGraphParameterDeclaration"]] = None, + sources: Optional[List["MediaGraphSource"]] = None, + processors: Optional[List["MediaGraphProcessor"]] = None, + sinks: Optional[List["MediaGraphSink"]] = None, + **kwargs + ): + super(MediaGraphTopologyProperties, self).__init__(**kwargs) + self.description = description + self.parameters = parameters + self.sources = sources + self.processors = processors + self.sinks = sinks + + +class MediaGraphTopologySetRequest(OperationBase): + """MediaGraphTopologySetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param graph: Required. Describes a graph topology. + :type graph: ~azure.media.lva.edge.models.MediaGraphTopology + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'graph': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + graph: "MediaGraphTopology", + **kwargs + ): + super(MediaGraphTopologySetRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologySet' # type: str + self.graph = graph + + +class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): + """MediaGraphTopologySetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Describes the properties of a graph topology. + :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + system_data: Optional["MediaGraphSystemData"] = None, + properties: Optional["MediaGraphTopologyProperties"] = None, + **kwargs + ): + super(MediaGraphTopologySetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) + self.method_name = 'MediaGraphTopologySetRequestBody' # type: str + self.method_name = 'MediaGraphTopologySetRequestBody' # type: str + self.name = name + self.system_data = system_data + self.properties = properties + + +class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): + """An endpoint that the media graph can connect to, with no encryption in transit. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__( + self, + *, + url: str, + credentials: Optional["MediaGraphCredentials"] = None, + **kwargs + ): + super(MediaGraphUnsecuredEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) + self.type = '#Microsoft.Media.MediaGraphUnsecuredEndpoint' # type: str + + +class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): + """Username/password credential pair. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param username: Required. Username for a username/password pair. + :type username: str + :param password: Password for a username/password pair. + :type password: str + """ + + _validation = { + 'type': {'required': True}, + 'username': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'username': {'key': 'username', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__( + self, + *, + username: str, + password: Optional[str] = None, + **kwargs + ): + super(MediaGraphUsernamePasswordCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphUsernamePasswordCredentials' # type: str + self.username = username + self.password = password diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/py.typed b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/py.typed new file mode 100644 index 000000000000..e5aff4f83af8 --- /dev/null +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/dev_requirements.txt b/sdk/media/azure-media-livevideoanalytics-edge/dev_requirements.txt similarity index 100% rename from sdk/media/azure-media-lva-edge/dev_requirements.txt rename to sdk/media/azure-media-livevideoanalytics-edge/dev_requirements.txt diff --git a/sdk/media/azure-media-lva-edge/docs/DevTips.md b/sdk/media/azure-media-livevideoanalytics-edge/docs/DevTips.md similarity index 80% rename from sdk/media/azure-media-lva-edge/docs/DevTips.md rename to sdk/media/azure-media-livevideoanalytics-edge/docs/DevTips.md index b649d500d873..aee95a990e07 100644 --- a/sdk/media/azure-media-lva-edge/docs/DevTips.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/docs/DevTips.md @@ -18,11 +18,11 @@ tox -c eng/tox/tox.ini ``` To run a specific tox command from your directory use the following commands: ```bash -azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e sphinx -azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e lint -azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e mypy -azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e whl -azure-sdk-for-python\sdk\api-learn\azure-learnappconfig> tox -c ../../../eng/tox/tox.ini -e sdist +> tox -c ../../../eng/tox/tox.ini -e sphinx +> tox -c ../../../eng/tox/tox.ini -e lint +> tox -c ../../../eng/tox/tox.ini -e mypy +> tox -c ../../../eng/tox/tox.ini -e whl +> tox -c ../../../eng/tox/tox.ini -e sdist ``` A quick description of the five commands above: * sphinx: documentation generation using the inline comments written in our code diff --git a/sdk/media/azure-media-lva-edge/samples/sample_conditional_async.py b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_conditional_async.py similarity index 100% rename from sdk/media/azure-media-lva-edge/samples/sample_conditional_async.py rename to sdk/media/azure-media-livevideoanalytics-edge/samples/sample_conditional_async.py diff --git a/sdk/media/azure-media-lva-edge/samples/sample_hello_world.py b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_hello_world.py similarity index 100% rename from sdk/media/azure-media-lva-edge/samples/sample_hello_world.py rename to sdk/media/azure-media-livevideoanalytics-edge/samples/sample_hello_world.py diff --git a/sdk/media/azure-media-lva-edge/samples/sample_lva.py b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py similarity index 100% rename from sdk/media/azure-media-lva-edge/samples/sample_lva.py rename to sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py diff --git a/sdk/media/azure-media-lva-edge/sdk_packaging.toml b/sdk/media/azure-media-livevideoanalytics-edge/sdk_packaging.toml similarity index 100% rename from sdk/media/azure-media-lva-edge/sdk_packaging.toml rename to sdk/media/azure-media-livevideoanalytics-edge/sdk_packaging.toml diff --git a/sdk/media/azure-media-lva-edge/setup.cfg b/sdk/media/azure-media-livevideoanalytics-edge/setup.cfg similarity index 100% rename from sdk/media/azure-media-lva-edge/setup.cfg rename to sdk/media/azure-media-livevideoanalytics-edge/setup.cfg diff --git a/sdk/media/azure-media-lva-edge/setup.py b/sdk/media/azure-media-livevideoanalytics-edge/setup.py similarity index 98% rename from sdk/media/azure-media-lva-edge/setup.py rename to sdk/media/azure-media-livevideoanalytics-edge/setup.py index a4bfc61f9c6f..324e31db3312 100644 --- a/sdk/media/azure-media-lva-edge/setup.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/setup.py @@ -13,7 +13,7 @@ from setuptools import find_packages, setup # Change the PACKAGE_NAME only to change folder and different name -PACKAGE_NAME = "azure-media-lva-edge" +PACKAGE_NAME = "azure-media-livevideoanalytics-edge" PACKAGE_PPRINT_NAME = "Azure Media Live Video Analytics Edge SDK" # a-b-c => a/b/c diff --git a/sdk/media/azure-media-lva-edge/swagger/autorest.md b/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md similarity index 78% rename from sdk/media/azure-media-lva-edge/swagger/autorest.md rename to sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md index 48618fb331ed..d318650fa662 100644 --- a/sdk/media/azure-media-lva-edge/swagger/autorest.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md @@ -10,7 +10,7 @@ autorest --v3 --python ## Settings ```yaml -require: <>Azure\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md +require: C:\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md output-folder: ../azure/media/lva/edge/_generated namespace: azure.media.lva.edge no-namespace-folders: true diff --git a/sdk/media/azure-media-lva-edge/swagger/commandOutput.txt b/sdk/media/azure-media-livevideoanalytics-edge/swagger/commandOutput.txt similarity index 100% rename from sdk/media/azure-media-lva-edge/swagger/commandOutput.txt rename to sdk/media/azure-media-livevideoanalytics-edge/swagger/commandOutput.txt diff --git a/sdk/media/azure-media-lva-edge/tests/conftest.py b/sdk/media/azure-media-livevideoanalytics-edge/tests/conftest.py similarity index 100% rename from sdk/media/azure-media-lva-edge/tests/conftest.py rename to sdk/media/azure-media-livevideoanalytics-edge/tests/conftest.py diff --git a/sdk/media/azure-media-lva-edge/tests/test_app_config.py b/sdk/media/azure-media-livevideoanalytics-edge/tests/test_app_config.py similarity index 100% rename from sdk/media/azure-media-lva-edge/tests/test_app_config.py rename to sdk/media/azure-media-livevideoanalytics-edge/tests/test_app_config.py From 5b9149f672f5920520d754ce5ab6a34059898b95 Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 3 Dec 2020 09:02:47 -0800 Subject: [PATCH 46/64] fixing broken link --- sdk/media/azure-media-livevideoanalytics-edge/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/README.md b/sdk/media/azure-media-livevideoanalytics-edge/README.md index d2467be44b9e..5e665397682c 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/README.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/README.md @@ -121,7 +121,7 @@ additional questions or comments. [coc_contact]: mailto:opencode@microsoft.com [package]: TODO://link-to-published-package -[source]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/media/azure-media-lva-edge +[source]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/media [samples]: https://github.com/Azure-Samples/live-video-analytics-iot-edge-python [doc_direct_methods]: https://docs.microsoft.com/azure/media-services/live-video-analytics-edge/direct-methods From 656f4450a24a73252971b71c2f1c87b645049187 Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 3 Dec 2020 09:15:35 -0800 Subject: [PATCH 47/64] missed one namespace update' --- .../edge/_generated/models/_models.py | 123 +- .../edge/_generated/models/_models_py3.py | 123 +- .../media/lva/edge/_generated/__init__.py | 1 - .../media/lva/edge/_generated/_version.py | 9 - .../lva/edge/_generated/models/__init__.py | 199 -- ...r_live_video_analyticson_io_tedge_enums.py | 108 - .../lva/edge/_generated/models/_models.py | 2008 --------------- .../lva/edge/_generated/models/_models_py3.py | 2185 ----------------- .../azure/media/lva/edge/_generated/py.typed | 1 - .../swagger/autorest.md | 4 +- 10 files changed, 136 insertions(+), 4625 deletions(-) delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/__init__.py delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/_version.py delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/__init__.py delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models.py delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models_py3.py delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/py.typed diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py index 62f58c7ea385..b0cb8248aec0 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py @@ -106,7 +106,7 @@ class MediaGraphSink(msrest.serialization.Model): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] """ _validation = { @@ -146,7 +146,7 @@ class MediaGraphAssetSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param asset_name_pattern: A name pattern when creating new assets. :type asset_name_pattern: str :param segment_length: When writing media to an asset, wait until at least this duration of @@ -236,7 +236,7 @@ class MediaGraphProcessor(msrest.serialization.Model): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] """ _validation = { @@ -279,11 +279,11 @@ class MediaGraphExtensionProcessorBase(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage + :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage """ _validation = { @@ -325,11 +325,11 @@ class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBas :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage + :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage """ _validation = { @@ -397,7 +397,7 @@ class MediaGraphEndpoint(msrest.serialization.Model): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str """ @@ -438,7 +438,7 @@ class MediaGraphFileSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param file_path_pattern: Required. Absolute file path pattern for creating new files on the Edge device. :type file_path_pattern: str @@ -478,7 +478,7 @@ class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not exceed this limit. :type maximum_fps: str @@ -517,13 +517,14 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage + :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage :param data_transfer: Required. How media should be transferred to the inferencing engine. - :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer + :type data_transfer: + ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransfer """ _validation = { @@ -561,7 +562,8 @@ class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): :type shared_memory_size_mi_b: str :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible values include: "Embedded", "SharedMemory". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode + :type mode: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransferMode """ _validation = { @@ -593,11 +595,11 @@ class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage + :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage """ _validation = { @@ -661,9 +663,9 @@ class MediaGraphImage(msrest.serialization.Model): """Describes the properties of an image frame. :param scale: The scaling mode for the image. - :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale + :type scale: ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScale :param format: Encoding settings for an image. - :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat + :type format: ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormat """ _attribute_map = { @@ -721,7 +723,8 @@ class MediaGraphImageFormatEncoded(MediaGraphImageFormat): :type type: str :param encoding: The different encoding formats that can be used for the image. Possible values include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". - :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat + :type encoding: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphImageEncodingFormat :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best quality). :type quality: str @@ -756,7 +759,8 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): :type type: str :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". - :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat + :type pixel_format: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormatRawPixelFormat """ _validation = { @@ -782,7 +786,7 @@ class MediaGraphImageScale(msrest.serialization.Model): :param mode: Describes the modes for scaling an input video frame into an image, before it is sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode + :type mode: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScaleMode :param width: The desired output width of the image. :type width: str :param height: The desired output height of the image. @@ -813,9 +817,9 @@ class MediaGraphInstance(msrest.serialization.Model): :param name: Required. name. :type name: str :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties """ _validation = { @@ -879,7 +883,7 @@ class MediaGraphInstanceCollection(msrest.serialization.Model): """Collection of graph instances. :param value: Collection of graph instances. - :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] + :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphInstance] :param continuation_token: Continuation token to use in subsequent calls to enumerate through the graph instance collection (when the collection contains too many results to return in one response). @@ -1051,10 +1055,11 @@ class MediaGraphInstanceProperties(msrest.serialization.Model): with this name should already have been set in the Edge module. :type topology_name: str :param parameters: List of one or more graph instance parameters. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] + :type parameters: + list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDefinition] :param state: Allowed states for a graph Instance. Possible values include: "Inactive", "Activating", "Active", "Deactivating". - :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState + :type state: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceState """ _attribute_map = { @@ -1087,7 +1092,7 @@ class MediaGraphInstanceSetRequest(OperationBase): :ivar api_version: api version. Default value: "1.0". :vartype api_version: str :param instance: Required. Represents a Media Graph instance. - :type instance: ~azure.media.lva.edge.models.MediaGraphInstance + :type instance: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstance """ _validation = { @@ -1127,9 +1132,9 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): :param name: Required. name. :type name: str :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties """ _validation = { @@ -1171,7 +1176,7 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param hub_output_name: Name of the output path to which the graph will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest. @@ -1284,10 +1289,11 @@ class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param sensitivity: Enumeration that specifies the sensitivity of the motion detection processor. Possible values include: "Low", "Medium", "High". - :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity + :type sensitivity: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphMotionDetectionSensitivity :param output_motion_region: Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true. :type output_motion_region: bool @@ -1324,7 +1330,8 @@ class MediaGraphNodeInput(msrest.serialization.Model): input to this node. :type node_name: str :param output_selectors: Allows for the selection of particular streams from another node. - :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] + :type output_selectors: + list[~azure.media.livevideoanalytics.edge.models.MediaGraphOutputSelector] """ _attribute_map = { @@ -1349,7 +1356,8 @@ class MediaGraphOutputSelector(msrest.serialization.Model): :ivar property: The stream property to compare with. Default value: "mediaType". :vartype property: str :param operator: The operator to compare streams by. Possible values include: "is", "isNot". - :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator + :type operator: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphOutputSelectorOperator :param value: Value to compare against. :type value: str """ @@ -1384,7 +1392,7 @@ class MediaGraphParameterDeclaration(msrest.serialization.Model): :type name: str :param type: Required. name. Possible values include: "String", "SecretString", "Int", "Double", "Bool". - :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType + :type type: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphParameterType :param description: Description of the parameter. :type description: str :param default: The default value for the parameter, to be used if the graph instance does not @@ -1487,9 +1495,9 @@ class MediaGraphRtspSource(MediaGraphSource): :type name: str :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. Possible values include: "Http", "Tcp". - :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport + :type transport: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphRtspTransport :param endpoint: Required. RTSP endpoint of the stream that is being connected to. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint """ _validation = { @@ -1526,7 +1534,7 @@ class MediaGraphSignalGateProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param activation_evaluation_window: The period of time over which the gate gathers input events, before evaluating them. :type activation_evaluation_window: str @@ -1601,15 +1609,17 @@ class MediaGraphTlsEndpoint(MediaGraphEndpoint): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null designates that Azure Media Service's source of trust should be used. - :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource + :type trusted_certificates: + ~azure.media.livevideoanalytics.edge.models.MediaGraphCertificateSource :param validation_options: Validation options to use when authenticating a TLS connection. By default, strict validation is used. - :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions + :type validation_options: + ~azure.media.livevideoanalytics.edge.models.MediaGraphTlsValidationOptions """ _validation = { @@ -1667,9 +1677,9 @@ class MediaGraphTopology(msrest.serialization.Model): :param name: Required. name. :type name: str :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties """ _validation = { @@ -1696,7 +1706,7 @@ class MediaGraphTopologyCollection(msrest.serialization.Model): """Collection of graph topologies. :param value: Collection of graph topologies. - :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] + :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphTopology] :param continuation_token: Continuation token to use in subsequent calls to enumerate through the graph topologies collection (when the collection contains too many results to return in one response). @@ -1828,13 +1838,14 @@ class MediaGraphTopologyProperties(msrest.serialization.Model): :param description: An optional description for the instance. :type description: str :param parameters: An optional description for the instance. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] + :type parameters: + list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDeclaration] :param sources: An optional description for the instance. - :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] + :type sources: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSource] :param processors: An optional description for the instance. - :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] + :type processors: list[~azure.media.livevideoanalytics.edge.models.MediaGraphProcessor] :param sinks: name. - :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] + :type sinks: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSink] """ _attribute_map = { @@ -1869,7 +1880,7 @@ class MediaGraphTopologySetRequest(OperationBase): :ivar api_version: api version. Default value: "1.0". :vartype api_version: str :param graph: Required. Describes a graph topology. - :type graph: ~azure.media.lva.edge.models.MediaGraphTopology + :type graph: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopology """ _validation = { @@ -1909,9 +1920,9 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): :param name: Required. name. :type name: str :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties """ _validation = { @@ -1950,7 +1961,7 @@ class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str """ diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py index 5de3adde8e11..a71214b4003f 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py @@ -113,7 +113,7 @@ class MediaGraphSink(msrest.serialization.Model): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] """ _validation = { @@ -156,7 +156,7 @@ class MediaGraphAssetSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param asset_name_pattern: A name pattern when creating new assets. :type asset_name_pattern: str :param segment_length: When writing media to an asset, wait until at least this duration of @@ -253,7 +253,7 @@ class MediaGraphProcessor(msrest.serialization.Model): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] """ _validation = { @@ -299,11 +299,11 @@ class MediaGraphExtensionProcessorBase(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage + :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage """ _validation = { @@ -350,11 +350,11 @@ class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBas :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage + :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage """ _validation = { @@ -427,7 +427,7 @@ class MediaGraphEndpoint(msrest.serialization.Model): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str """ @@ -471,7 +471,7 @@ class MediaGraphFileSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param file_path_pattern: Required. Absolute file path pattern for creating new files on the Edge device. :type file_path_pattern: str @@ -515,7 +515,7 @@ class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not exceed this limit. :type maximum_fps: str @@ -558,13 +558,14 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage + :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage :param data_transfer: Required. How media should be transferred to the inferencing engine. - :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer + :type data_transfer: + ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransfer """ _validation = { @@ -608,7 +609,8 @@ class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): :type shared_memory_size_mi_b: str :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible values include: "Embedded", "SharedMemory". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode + :type mode: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransferMode """ _validation = { @@ -643,11 +645,11 @@ class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage + :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage """ _validation = { @@ -719,9 +721,9 @@ class MediaGraphImage(msrest.serialization.Model): """Describes the properties of an image frame. :param scale: The scaling mode for the image. - :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale + :type scale: ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScale :param format: Encoding settings for an image. - :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat + :type format: ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormat """ _attribute_map = { @@ -782,7 +784,8 @@ class MediaGraphImageFormatEncoded(MediaGraphImageFormat): :type type: str :param encoding: The different encoding formats that can be used for the image. Possible values include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". - :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat + :type encoding: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphImageEncodingFormat :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best quality). :type quality: str @@ -820,7 +823,8 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): :type type: str :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". - :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat + :type pixel_format: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormatRawPixelFormat """ _validation = { @@ -848,7 +852,7 @@ class MediaGraphImageScale(msrest.serialization.Model): :param mode: Describes the modes for scaling an input video frame into an image, before it is sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode + :type mode: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScaleMode :param width: The desired output width of the image. :type width: str :param height: The desired output height of the image. @@ -883,9 +887,9 @@ class MediaGraphInstance(msrest.serialization.Model): :param name: Required. name. :type name: str :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties """ _validation = { @@ -955,7 +959,7 @@ class MediaGraphInstanceCollection(msrest.serialization.Model): """Collection of graph instances. :param value: Collection of graph instances. - :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] + :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphInstance] :param continuation_token: Continuation token to use in subsequent calls to enumerate through the graph instance collection (when the collection contains too many results to return in one response). @@ -1136,10 +1140,11 @@ class MediaGraphInstanceProperties(msrest.serialization.Model): with this name should already have been set in the Edge module. :type topology_name: str :param parameters: List of one or more graph instance parameters. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] + :type parameters: + list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDefinition] :param state: Allowed states for a graph Instance. Possible values include: "Inactive", "Activating", "Active", "Deactivating". - :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState + :type state: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceState """ _attribute_map = { @@ -1177,7 +1182,7 @@ class MediaGraphInstanceSetRequest(OperationBase): :ivar api_version: api version. Default value: "1.0". :vartype api_version: str :param instance: Required. Represents a Media Graph instance. - :type instance: ~azure.media.lva.edge.models.MediaGraphInstance + :type instance: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstance """ _validation = { @@ -1219,9 +1224,9 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): :param name: Required. name. :type name: str :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties """ _validation = { @@ -1267,7 +1272,7 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param hub_output_name: Name of the output path to which the graph will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest. @@ -1389,10 +1394,11 @@ class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param sensitivity: Enumeration that specifies the sensitivity of the motion detection processor. Possible values include: "Low", "Medium", "High". - :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity + :type sensitivity: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphMotionDetectionSensitivity :param output_motion_region: Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true. :type output_motion_region: bool @@ -1434,7 +1440,8 @@ class MediaGraphNodeInput(msrest.serialization.Model): input to this node. :type node_name: str :param output_selectors: Allows for the selection of particular streams from another node. - :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] + :type output_selectors: + list[~azure.media.livevideoanalytics.edge.models.MediaGraphOutputSelector] """ _attribute_map = { @@ -1462,7 +1469,8 @@ class MediaGraphOutputSelector(msrest.serialization.Model): :ivar property: The stream property to compare with. Default value: "mediaType". :vartype property: str :param operator: The operator to compare streams by. Possible values include: "is", "isNot". - :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator + :type operator: str or + ~azure.media.livevideoanalytics.edge.models.MediaGraphOutputSelectorOperator :param value: Value to compare against. :type value: str """ @@ -1500,7 +1508,7 @@ class MediaGraphParameterDeclaration(msrest.serialization.Model): :type name: str :param type: Required. name. Possible values include: "String", "SecretString", "Int", "Double", "Bool". - :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType + :type type: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphParameterType :param description: Description of the parameter. :type description: str :param default: The default value for the parameter, to be used if the graph instance does not @@ -1613,9 +1621,9 @@ class MediaGraphRtspSource(MediaGraphSource): :type name: str :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. Possible values include: "Http", "Tcp". - :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport + :type transport: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphRtspTransport :param endpoint: Required. RTSP endpoint of the stream that is being connected to. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint """ _validation = { @@ -1656,7 +1664,7 @@ class MediaGraphSignalGateProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param activation_evaluation_window: The period of time over which the gate gathers input events, before evaluating them. :type activation_evaluation_window: str @@ -1741,15 +1749,17 @@ class MediaGraphTlsEndpoint(MediaGraphEndpoint): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null designates that Azure Media Service's source of trust should be used. - :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource + :type trusted_certificates: + ~azure.media.livevideoanalytics.edge.models.MediaGraphCertificateSource :param validation_options: Validation options to use when authenticating a TLS connection. By default, strict validation is used. - :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions + :type validation_options: + ~azure.media.livevideoanalytics.edge.models.MediaGraphTlsValidationOptions """ _validation = { @@ -1815,9 +1825,9 @@ class MediaGraphTopology(msrest.serialization.Model): :param name: Required. name. :type name: str :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties """ _validation = { @@ -1848,7 +1858,7 @@ class MediaGraphTopologyCollection(msrest.serialization.Model): """Collection of graph topologies. :param value: Collection of graph topologies. - :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] + :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphTopology] :param continuation_token: Continuation token to use in subsequent calls to enumerate through the graph topologies collection (when the collection contains too many results to return in one response). @@ -1987,13 +1997,14 @@ class MediaGraphTopologyProperties(msrest.serialization.Model): :param description: An optional description for the instance. :type description: str :param parameters: An optional description for the instance. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] + :type parameters: + list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDeclaration] :param sources: An optional description for the instance. - :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] + :type sources: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSource] :param processors: An optional description for the instance. - :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] + :type processors: list[~azure.media.livevideoanalytics.edge.models.MediaGraphProcessor] :param sinks: name. - :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] + :type sinks: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSink] """ _attribute_map = { @@ -2034,7 +2045,7 @@ class MediaGraphTopologySetRequest(OperationBase): :ivar api_version: api version. Default value: "1.0". :vartype api_version: str :param graph: Required. Describes a graph topology. - :type graph: ~azure.media.lva.edge.models.MediaGraphTopology + :type graph: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopology """ _validation = { @@ -2076,9 +2087,9 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): :param name: Required. name. :type name: str :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties """ _validation = { @@ -2121,7 +2132,7 @@ class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str """ diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/__init__.py deleted file mode 100644 index 5960c353a898..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore \ No newline at end of file diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/_version.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/_version.py deleted file mode 100644 index 31ed98425268..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/_version.py +++ /dev/null @@ -1,9 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -VERSION = "1.0" diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/__init__.py deleted file mode 100644 index 2e389ab8ef9d..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/__init__.py +++ /dev/null @@ -1,199 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import ItemNonSetRequestBase - from ._models_py3 import MediaGraphAssetSink - from ._models_py3 import MediaGraphCertificateSource - from ._models_py3 import MediaGraphCognitiveServicesVisionExtension - from ._models_py3 import MediaGraphCredentials - from ._models_py3 import MediaGraphEndpoint - from ._models_py3 import MediaGraphExtensionProcessorBase - from ._models_py3 import MediaGraphFileSink - from ._models_py3 import MediaGraphFrameRateFilterProcessor - from ._models_py3 import MediaGraphGrpcExtension - from ._models_py3 import MediaGraphGrpcExtensionDataTransfer - from ._models_py3 import MediaGraphHttpExtension - from ._models_py3 import MediaGraphHttpHeaderCredentials - from ._models_py3 import MediaGraphImage - from ._models_py3 import MediaGraphImageFormat - from ._models_py3 import MediaGraphImageFormatEncoded - from ._models_py3 import MediaGraphImageFormatRaw - from ._models_py3 import MediaGraphImageScale - from ._models_py3 import MediaGraphInstance - from ._models_py3 import MediaGraphInstanceActivateRequest - from ._models_py3 import MediaGraphInstanceCollection - from ._models_py3 import MediaGraphInstanceDeActivateRequest - from ._models_py3 import MediaGraphInstanceDeleteRequest - from ._models_py3 import MediaGraphInstanceGetRequest - from ._models_py3 import MediaGraphInstanceListRequest - from ._models_py3 import MediaGraphInstanceProperties - from ._models_py3 import MediaGraphInstanceSetRequest - from ._models_py3 import MediaGraphInstanceSetRequestBody - from ._models_py3 import MediaGraphIoTHubMessageSink - from ._models_py3 import MediaGraphIoTHubMessageSource - from ._models_py3 import MediaGraphMotionDetectionProcessor - from ._models_py3 import MediaGraphNodeInput - from ._models_py3 import MediaGraphOutputSelector - from ._models_py3 import MediaGraphParameterDeclaration - from ._models_py3 import MediaGraphParameterDefinition - from ._models_py3 import MediaGraphPemCertificateList - from ._models_py3 import MediaGraphProcessor - from ._models_py3 import MediaGraphRtspSource - from ._models_py3 import MediaGraphSignalGateProcessor - from ._models_py3 import MediaGraphSink - from ._models_py3 import MediaGraphSource - from ._models_py3 import MediaGraphSystemData - from ._models_py3 import MediaGraphTlsEndpoint - from ._models_py3 import MediaGraphTlsValidationOptions - from ._models_py3 import MediaGraphTopology - from ._models_py3 import MediaGraphTopologyCollection - from ._models_py3 import MediaGraphTopologyDeleteRequest - from ._models_py3 import MediaGraphTopologyGetRequest - from ._models_py3 import MediaGraphTopologyListRequest - from ._models_py3 import MediaGraphTopologyProperties - from ._models_py3 import MediaGraphTopologySetRequest - from ._models_py3 import MediaGraphTopologySetRequestBody - from ._models_py3 import MediaGraphUnsecuredEndpoint - from ._models_py3 import MediaGraphUsernamePasswordCredentials - from ._models_py3 import OperationBase -except (SyntaxError, ImportError): - from ._models import ItemNonSetRequestBase # type: ignore - from ._models import MediaGraphAssetSink # type: ignore - from ._models import MediaGraphCertificateSource # type: ignore - from ._models import MediaGraphCognitiveServicesVisionExtension # type: ignore - from ._models import MediaGraphCredentials # type: ignore - from ._models import MediaGraphEndpoint # type: ignore - from ._models import MediaGraphExtensionProcessorBase # type: ignore - from ._models import MediaGraphFileSink # type: ignore - from ._models import MediaGraphFrameRateFilterProcessor # type: ignore - from ._models import MediaGraphGrpcExtension # type: ignore - from ._models import MediaGraphGrpcExtensionDataTransfer # type: ignore - from ._models import MediaGraphHttpExtension # type: ignore - from ._models import MediaGraphHttpHeaderCredentials # type: ignore - from ._models import MediaGraphImage # type: ignore - from ._models import MediaGraphImageFormat # type: ignore - from ._models import MediaGraphImageFormatEncoded # type: ignore - from ._models import MediaGraphImageFormatRaw # type: ignore - from ._models import MediaGraphImageScale # type: ignore - from ._models import MediaGraphInstance # type: ignore - from ._models import MediaGraphInstanceActivateRequest # type: ignore - from ._models import MediaGraphInstanceCollection # type: ignore - from ._models import MediaGraphInstanceDeActivateRequest # type: ignore - from ._models import MediaGraphInstanceDeleteRequest # type: ignore - from ._models import MediaGraphInstanceGetRequest # type: ignore - from ._models import MediaGraphInstanceListRequest # type: ignore - from ._models import MediaGraphInstanceProperties # type: ignore - from ._models import MediaGraphInstanceSetRequest # type: ignore - from ._models import MediaGraphInstanceSetRequestBody # type: ignore - from ._models import MediaGraphIoTHubMessageSink # type: ignore - from ._models import MediaGraphIoTHubMessageSource # type: ignore - from ._models import MediaGraphMotionDetectionProcessor # type: ignore - from ._models import MediaGraphNodeInput # type: ignore - from ._models import MediaGraphOutputSelector # type: ignore - from ._models import MediaGraphParameterDeclaration # type: ignore - from ._models import MediaGraphParameterDefinition # type: ignore - from ._models import MediaGraphPemCertificateList # type: ignore - from ._models import MediaGraphProcessor # type: ignore - from ._models import MediaGraphRtspSource # type: ignore - from ._models import MediaGraphSignalGateProcessor # type: ignore - from ._models import MediaGraphSink # type: ignore - from ._models import MediaGraphSource # type: ignore - from ._models import MediaGraphSystemData # type: ignore - from ._models import MediaGraphTlsEndpoint # type: ignore - from ._models import MediaGraphTlsValidationOptions # type: ignore - from ._models import MediaGraphTopology # type: ignore - from ._models import MediaGraphTopologyCollection # type: ignore - from ._models import MediaGraphTopologyDeleteRequest # type: ignore - from ._models import MediaGraphTopologyGetRequest # type: ignore - from ._models import MediaGraphTopologyListRequest # type: ignore - from ._models import MediaGraphTopologyProperties # type: ignore - from ._models import MediaGraphTopologySetRequest # type: ignore - from ._models import MediaGraphTopologySetRequestBody # type: ignore - from ._models import MediaGraphUnsecuredEndpoint # type: ignore - from ._models import MediaGraphUsernamePasswordCredentials # type: ignore - from ._models import OperationBase # type: ignore - -from ._definitionsfor_live_video_analyticson_io_tedge_enums import ( - MediaGraphGrpcExtensionDataTransferMode, - MediaGraphImageEncodingFormat, - MediaGraphImageFormatRawPixelFormat, - MediaGraphImageScaleMode, - MediaGraphInstanceState, - MediaGraphMotionDetectionSensitivity, - MediaGraphOutputSelectorOperator, - MediaGraphParameterType, - MediaGraphRtspTransport, -) - -__all__ = [ - 'ItemNonSetRequestBase', - 'MediaGraphAssetSink', - 'MediaGraphCertificateSource', - 'MediaGraphCognitiveServicesVisionExtension', - 'MediaGraphCredentials', - 'MediaGraphEndpoint', - 'MediaGraphExtensionProcessorBase', - 'MediaGraphFileSink', - 'MediaGraphFrameRateFilterProcessor', - 'MediaGraphGrpcExtension', - 'MediaGraphGrpcExtensionDataTransfer', - 'MediaGraphHttpExtension', - 'MediaGraphHttpHeaderCredentials', - 'MediaGraphImage', - 'MediaGraphImageFormat', - 'MediaGraphImageFormatEncoded', - 'MediaGraphImageFormatRaw', - 'MediaGraphImageScale', - 'MediaGraphInstance', - 'MediaGraphInstanceActivateRequest', - 'MediaGraphInstanceCollection', - 'MediaGraphInstanceDeActivateRequest', - 'MediaGraphInstanceDeleteRequest', - 'MediaGraphInstanceGetRequest', - 'MediaGraphInstanceListRequest', - 'MediaGraphInstanceProperties', - 'MediaGraphInstanceSetRequest', - 'MediaGraphInstanceSetRequestBody', - 'MediaGraphIoTHubMessageSink', - 'MediaGraphIoTHubMessageSource', - 'MediaGraphMotionDetectionProcessor', - 'MediaGraphNodeInput', - 'MediaGraphOutputSelector', - 'MediaGraphParameterDeclaration', - 'MediaGraphParameterDefinition', - 'MediaGraphPemCertificateList', - 'MediaGraphProcessor', - 'MediaGraphRtspSource', - 'MediaGraphSignalGateProcessor', - 'MediaGraphSink', - 'MediaGraphSource', - 'MediaGraphSystemData', - 'MediaGraphTlsEndpoint', - 'MediaGraphTlsValidationOptions', - 'MediaGraphTopology', - 'MediaGraphTopologyCollection', - 'MediaGraphTopologyDeleteRequest', - 'MediaGraphTopologyGetRequest', - 'MediaGraphTopologyListRequest', - 'MediaGraphTopologyProperties', - 'MediaGraphTopologySetRequest', - 'MediaGraphTopologySetRequestBody', - 'MediaGraphUnsecuredEndpoint', - 'MediaGraphUsernamePasswordCredentials', - 'OperationBase', - 'MediaGraphGrpcExtensionDataTransferMode', - 'MediaGraphImageEncodingFormat', - 'MediaGraphImageFormatRawPixelFormat', - 'MediaGraphImageScaleMode', - 'MediaGraphInstanceState', - 'MediaGraphMotionDetectionSensitivity', - 'MediaGraphOutputSelectorOperator', - 'MediaGraphParameterType', - 'MediaGraphRtspTransport', -] diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py deleted file mode 100644 index 6e78e4728244..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py +++ /dev/null @@ -1,108 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum, EnumMeta -from six import with_metaclass - -class _CaseInsensitiveEnumMeta(EnumMeta): - def __getitem__(self, name): - return super().__getitem__(name.upper()) - - def __getattr__(cls, name): - """Return the enum member matching `name` - We use __getattr__ instead of descriptors or inserting into the enum - class' __dict__ in order to support `name` and `value` being both - properties for enum members (which live in the class' __dict__) and - enum members themselves. - """ - try: - return cls._member_map_[name.upper()] - except KeyError: - raise AttributeError(name) - - -class MediaGraphGrpcExtensionDataTransferMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """How frame data should be transmitted to the inferencing engine. - """ - - EMBEDDED = "Embedded" #: Frames are transferred embedded into the gRPC messages. - SHARED_MEMORY = "SharedMemory" #: Frames are transferred through shared memory. - -class MediaGraphImageEncodingFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The different encoding formats that can be used for the image. - """ - - JPEG = "Jpeg" #: JPEG image format. - BMP = "Bmp" #: BMP image format. - PNG = "Png" #: PNG image format. - -class MediaGraphImageFormatRawPixelFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """pixel format - """ - - YUV420_P = "Yuv420p" #: Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples). - RGB565_BE = "Rgb565be" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian. - RGB565_LE = "Rgb565le" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian. - RGB555_BE = "Rgb555be" #: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined. - RGB555_LE = "Rgb555le" #: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined. - RGB24 = "Rgb24" #: Packed RGB 8:8:8, 24bpp, RGBRGB. - BGR24 = "Bgr24" #: Packed RGB 8:8:8, 24bpp, BGRBGR. - ARGB = "Argb" #: Packed ARGB 8:8:8:8, 32bpp, ARGBARGB. - RGBA = "Rgba" #: Packed RGBA 8:8:8:8, 32bpp, RGBARGBA. - ABGR = "Abgr" #: Packed ABGR 8:8:8:8, 32bpp, ABGRABGR. - BGRA = "Bgra" #: Packed BGRA 8:8:8:8, 32bpp, BGRABGRA. - -class MediaGraphImageScaleMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Describes the modes for scaling an input video frame into an image, before it is sent to an - inference engine. - """ - - PRESERVE_ASPECT_RATIO = "PreserveAspectRatio" #: Use the same aspect ratio as the input frame. - PAD = "Pad" #: Center pad the input frame to match the given dimensions. - STRETCH = "Stretch" #: Stretch input frame to match given dimensions. - -class MediaGraphInstanceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Allowed states for a graph Instance. - """ - - INACTIVE = "Inactive" #: Inactive state. - ACTIVATING = "Activating" #: Activating state. - ACTIVE = "Active" #: Active state. - DEACTIVATING = "Deactivating" #: Deactivating state. - -class MediaGraphMotionDetectionSensitivity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Enumeration that specifies the sensitivity of the motion detection processor. - """ - - LOW = "Low" #: Low Sensitivity. - MEDIUM = "Medium" #: Medium Sensitivity. - HIGH = "High" #: High Sensitivity. - -class MediaGraphOutputSelectorOperator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The operator to compare streams by. - """ - - IS_ENUM = "is" #: A media type is the same type or a subtype. - IS_NOT = "isNot" #: A media type is not the same type or a subtype. - -class MediaGraphParameterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """name - """ - - STRING = "String" #: A string parameter value. - SECRET_STRING = "SecretString" #: A string to hold sensitive information as parameter value. - INT = "Int" #: A 32-bit signed integer as parameter value. - DOUBLE = "Double" #: A 64-bit double-precision floating point type as parameter value. - BOOL = "Bool" #: A boolean value that is either true or false. - -class MediaGraphRtspTransport(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Underlying RTSP transport. This is used to enable or disable HTTP tunneling. - """ - - HTTP = "Http" #: HTTP/HTTPS transport. This should be used when HTTP tunneling is desired. - TCP = "Tcp" #: TCP transport. This should be used when HTTP tunneling is NOT desired. diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models.py deleted file mode 100644 index 62f58c7ea385..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models.py +++ /dev/null @@ -1,2008 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import msrest.serialization - - -class OperationBase(msrest.serialization.Model): - """OperationBase. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - _subtype_map = { - 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(OperationBase, self).__init__(**kwargs) - self.method_name = None # type: Optional[str] - - -class ItemNonSetRequestBase(OperationBase): - """ItemNonSetRequestBase. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphInstanceActivateRequest, MediaGraphInstanceDeActivateRequest, MediaGraphInstanceDeleteRequest, MediaGraphInstanceGetRequest, MediaGraphTopologyDeleteRequest, MediaGraphTopologyGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - _subtype_map = { - 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(ItemNonSetRequestBase, self).__init__(**kwargs) - self.method_name = 'ItemNonSetRequestBase' # type: str - self.name = kwargs['name'] - - -class MediaGraphSink(msrest.serialization.Model): - """Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphAssetSink, MediaGraphFileSink, MediaGraphIoTHubMessageSink. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphAssetSink': 'MediaGraphAssetSink', '#Microsoft.Media.MediaGraphFileSink': 'MediaGraphFileSink', '#Microsoft.Media.MediaGraphIoTHubMessageSink': 'MediaGraphIoTHubMessageSink'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphSink, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = kwargs['name'] - self.inputs = kwargs['inputs'] - - -class MediaGraphAssetSink(MediaGraphSink): - """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param asset_name_pattern: A name pattern when creating new assets. - :type asset_name_pattern: str - :param segment_length: When writing media to an asset, wait until at least this duration of - media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum - of 30 seconds and a recommended maximum of 5 minutes. - :type segment_length: ~datetime.timedelta - :param local_media_cache_path: Path to a local file system directory for temporary caching of - media, before writing to an Asset. Used when the Edge device is temporarily disconnected from - Azure. - :type local_media_cache_path: str - :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for - temporary caching of media. - :type local_media_cache_maximum_size_mi_b: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, - 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, - 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, - 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphAssetSink, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphAssetSink' # type: str - self.asset_name_pattern = kwargs.get('asset_name_pattern', None) - self.segment_length = kwargs.get('segment_length', None) - self.local_media_cache_path = kwargs.get('local_media_cache_path', None) - self.local_media_cache_maximum_size_mi_b = kwargs.get('local_media_cache_maximum_size_mi_b', None) - - -class MediaGraphCertificateSource(msrest.serialization.Model): - """Base class for certificate sources. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphPemCertificateList. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphPemCertificateList': 'MediaGraphPemCertificateList'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphCertificateSource, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphProcessor(msrest.serialization.Model): - """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphProcessor, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = kwargs['name'] - self.inputs = kwargs['inputs'] - - -class MediaGraphExtensionProcessorBase(MediaGraphProcessor): - """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension': 'MediaGraphCognitiveServicesVisionExtension', '#Microsoft.Media.MediaGraphGrpcExtension': 'MediaGraphGrpcExtension', '#Microsoft.Media.MediaGraphHttpExtension': 'MediaGraphHttpExtension'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphExtensionProcessorBase, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str - self.endpoint = kwargs.get('endpoint', None) - self.image = kwargs.get('image', None) - - -class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphCognitiveServicesVisionExtension, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension' # type: str - - -class MediaGraphCredentials(msrest.serialization.Model): - """Credentials to present during authentication. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphHttpHeaderCredentials, MediaGraphUsernamePasswordCredentials. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphHttpHeaderCredentials': 'MediaGraphHttpHeaderCredentials', '#Microsoft.Media.MediaGraphUsernamePasswordCredentials': 'MediaGraphUsernamePasswordCredentials'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphCredentials, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphEndpoint(msrest.serialization.Model): - """Base class for endpoints. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphTlsEndpoint, MediaGraphUnsecuredEndpoint. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphTlsEndpoint': 'MediaGraphTlsEndpoint', '#Microsoft.Media.MediaGraphUnsecuredEndpoint': 'MediaGraphUnsecuredEndpoint'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphEndpoint, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.credentials = kwargs.get('credentials', None) - self.url = kwargs['url'] - - -class MediaGraphFileSink(MediaGraphSink): - """Enables a media graph to write/store media (video and audio) to a file on the Edge device. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param file_path_pattern: Required. Absolute file path pattern for creating new files on the - Edge device. - :type file_path_pattern: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - 'file_path_pattern': {'required': True, 'min_length': 1}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphFileSink, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str - self.file_path_pattern = kwargs['file_path_pattern'] - - -class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): - """Limits the frame rate on the input video stream based on the maximumFps property. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not - exceed this limit. - :type maximum_fps: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphFrameRateFilterProcessor, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str - self.maximum_fps = kwargs.get('maximum_fps', None) - - -class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - :param data_transfer: Required. How media should be transferred to the inferencing engine. - :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - 'data_transfer': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphGrpcExtension, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str - self.data_transfer = kwargs['data_transfer'] - - -class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): - """Describes how media should be transferred to the inferencing engine. - - All required parameters must be populated in order to send to Azure. - - :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if - mode is SharedMemory. Should not be specificed otherwise. - :type shared_memory_size_mi_b: str - :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible - values include: "Embedded", "SharedMemory". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode - """ - - _validation = { - 'mode': {'required': True}, - } - - _attribute_map = { - 'shared_memory_size_mi_b': {'key': 'sharedMemorySizeMiB', 'type': 'str'}, - 'mode': {'key': 'mode', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphGrpcExtensionDataTransfer, self).__init__(**kwargs) - self.shared_memory_size_mi_b = kwargs.get('shared_memory_size_mi_b', None) - self.mode = kwargs['mode'] - - -class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphHttpExtension, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphHttpExtension' # type: str - - -class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): - """Http header service credentials. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param header_name: Required. HTTP header name. - :type header_name: str - :param header_value: Required. HTTP header value. - :type header_value: str - """ - - _validation = { - 'type': {'required': True}, - 'header_name': {'required': True}, - 'header_value': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'header_name': {'key': 'headerName', 'type': 'str'}, - 'header_value': {'key': 'headerValue', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphHttpHeaderCredentials, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphHttpHeaderCredentials' # type: str - self.header_name = kwargs['header_name'] - self.header_value = kwargs['header_value'] - - -class MediaGraphImage(msrest.serialization.Model): - """Describes the properties of an image frame. - - :param scale: The scaling mode for the image. - :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale - :param format: Encoding settings for an image. - :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat - """ - - _attribute_map = { - 'scale': {'key': 'scale', 'type': 'MediaGraphImageScale'}, - 'format': {'key': 'format', 'type': 'MediaGraphImageFormat'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImage, self).__init__(**kwargs) - self.scale = kwargs.get('scale', None) - self.format = kwargs.get('format', None) - - -class MediaGraphImageFormat(msrest.serialization.Model): - """Encoding settings for an image. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImageFormat, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphImageFormatEncoded(MediaGraphImageFormat): - """Allowed formats for the image. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param encoding: The different encoding formats that can be used for the image. Possible values - include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". - :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat - :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best - quality). - :type quality: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'encoding': {'key': 'encoding', 'type': 'str'}, - 'quality': {'key': 'quality', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str - self.encoding = kwargs.get('encoding', "Jpeg") - self.quality = kwargs.get('quality', None) - - -class MediaGraphImageFormatRaw(MediaGraphImageFormat): - """Encoding settings for raw images. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", - "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". - :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'pixel_format': {'key': 'pixelFormat', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImageFormatRaw, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphImageFormatRaw' # type: str - self.pixel_format = kwargs.get('pixel_format', None) - - -class MediaGraphImageScale(msrest.serialization.Model): - """The scaling mode for the image. - - :param mode: Describes the modes for scaling an input video frame into an image, before it is - sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode - :param width: The desired output width of the image. - :type width: str - :param height: The desired output height of the image. - :type height: str - """ - - _attribute_map = { - 'mode': {'key': 'mode', 'type': 'str'}, - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImageScale, self).__init__(**kwargs) - self.mode = kwargs.get('mode', None) - self.width = kwargs.get('width', None) - self.height = kwargs.get('height', None) - - -class MediaGraphInstance(msrest.serialization.Model): - """Represents a Media Graph instance. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstance, self).__init__(**kwargs) - self.name = kwargs['name'] - self.system_data = kwargs.get('system_data', None) - self.properties = kwargs.get('properties', None) - - -class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): - """MediaGraphInstanceActivateRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceActivateRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceActivate' # type: str - - -class MediaGraphInstanceCollection(msrest.serialization.Model): - """Collection of graph instances. - - :param value: Collection of graph instances. - :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph instance collection (when the collection contains too many results to return in one - response). - :type continuation_token: str - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': '[MediaGraphInstance]'}, - 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceCollection, self).__init__(**kwargs) - self.value = kwargs.get('value', None) - self.continuation_token = kwargs.get('continuation_token', None) - - -class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): - """MediaGraphInstanceDeActivateRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceDeActivateRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceDeactivate' # type: str - - -class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): - """MediaGraphInstanceDeleteRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceDeleteRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceDelete' # type: str - - -class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): - """MediaGraphInstanceGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceGetRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceGet' # type: str - - -class MediaGraphInstanceListRequest(OperationBase): - """MediaGraphInstanceListRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceListRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceList' # type: str - - -class MediaGraphInstanceProperties(msrest.serialization.Model): - """Properties of a Media Graph instance. - - :param description: An optional description for the instance. - :type description: str - :param topology_name: The name of the graph topology that this instance will run. A topology - with this name should already have been set in the Edge module. - :type topology_name: str - :param parameters: List of one or more graph instance parameters. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] - :param state: Allowed states for a graph Instance. Possible values include: "Inactive", - "Activating", "Active", "Deactivating". - :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState - """ - - _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, - 'topology_name': {'key': 'topologyName', 'type': 'str'}, - 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDefinition]'}, - 'state': {'key': 'state', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceProperties, self).__init__(**kwargs) - self.description = kwargs.get('description', None) - self.topology_name = kwargs.get('topology_name', None) - self.parameters = kwargs.get('parameters', None) - self.state = kwargs.get('state', None) - - -class MediaGraphInstanceSetRequest(OperationBase): - """MediaGraphInstanceSetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param instance: Required. Represents a Media Graph instance. - :type instance: ~azure.media.lva.edge.models.MediaGraphInstance - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'instance': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceSetRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceSet' # type: str - self.instance = kwargs['instance'] - - -class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): - """MediaGraphInstanceSetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceSetRequestBody, self).__init__(**kwargs) - self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str - self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str - self.name = kwargs['name'] - self.system_data = kwargs.get('system_data', None) - self.properties = kwargs.get('properties', None) - - -class MediaGraphIoTHubMessageSink(MediaGraphSink): - """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param hub_output_name: Name of the output path to which the graph will publish message. These - messages can then be delivered to desired destinations by declaring routes referencing the - output path in the IoT Edge deployment manifest. - :type hub_output_name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'hub_output_name': {'key': 'hubOutputName', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphIoTHubMessageSink, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSink' # type: str - self.hub_output_name = kwargs.get('hub_output_name', None) - - -class MediaGraphSource(msrest.serialization.Model): - """Media graph source. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphIoTHubMessageSource': 'MediaGraphIoTHubMessageSource', '#Microsoft.Media.MediaGraphRtspSource': 'MediaGraphRtspSource'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphSource, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = kwargs['name'] - - -class MediaGraphIoTHubMessageSource(MediaGraphSource): - """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - :param hub_input_name: Name of the input path where messages can be routed to (via routes - declared in the IoT Edge deployment manifest). - :type hub_input_name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'hub_input_name': {'key': 'hubInputName', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphIoTHubMessageSource, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSource' # type: str - self.hub_input_name = kwargs.get('hub_input_name', None) - - -class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): - """A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param sensitivity: Enumeration that specifies the sensitivity of the motion detection - processor. Possible values include: "Low", "Medium", "High". - :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity - :param output_motion_region: Indicates whether the processor should detect and output the - regions, within the video frame, where motion was detected. Default is true. - :type output_motion_region: bool - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, - 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphMotionDetectionProcessor, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str - self.sensitivity = kwargs.get('sensitivity', None) - self.output_motion_region = kwargs.get('output_motion_region', None) - - -class MediaGraphNodeInput(msrest.serialization.Model): - """Represents the input to any node in a media graph. - - :param node_name: The name of another node in the media graph, the output of which is used as - input to this node. - :type node_name: str - :param output_selectors: Allows for the selection of particular streams from another node. - :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] - """ - - _attribute_map = { - 'node_name': {'key': 'nodeName', 'type': 'str'}, - 'output_selectors': {'key': 'outputSelectors', 'type': '[MediaGraphOutputSelector]'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphNodeInput, self).__init__(**kwargs) - self.node_name = kwargs.get('node_name', None) - self.output_selectors = kwargs.get('output_selectors', None) - - -class MediaGraphOutputSelector(msrest.serialization.Model): - """Allows for the selection of particular streams from another node. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar property: The stream property to compare with. Default value: "mediaType". - :vartype property: str - :param operator: The operator to compare streams by. Possible values include: "is", "isNot". - :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator - :param value: Value to compare against. - :type value: str - """ - - _validation = { - 'property': {'constant': True}, - } - - _attribute_map = { - 'property': {'key': 'property', 'type': 'str'}, - 'operator': {'key': 'operator', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - property = "mediaType" - - def __init__( - self, - **kwargs - ): - super(MediaGraphOutputSelector, self).__init__(**kwargs) - self.operator = kwargs.get('operator', None) - self.value = kwargs.get('value', None) - - -class MediaGraphParameterDeclaration(msrest.serialization.Model): - """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the parameter. - :type name: str - :param type: Required. name. Possible values include: "String", "SecretString", "Int", - "Double", "Bool". - :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType - :param description: Description of the parameter. - :type description: str - :param default: The default value for the parameter, to be used if the graph instance does not - specify a value. - :type default: str - """ - - _validation = { - 'name': {'required': True, 'max_length': 64, 'min_length': 0}, - 'type': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'default': {'key': 'default', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphParameterDeclaration, self).__init__(**kwargs) - self.name = kwargs['name'] - self.type = kwargs['type'] - self.description = kwargs.get('description', None) - self.default = kwargs.get('default', None) - - -class MediaGraphParameterDefinition(msrest.serialization.Model): - """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. Name of parameter as defined in the graph topology. - :type name: str - :param value: Required. Value of parameter. - :type value: str - """ - - _validation = { - 'name': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphParameterDefinition, self).__init__(**kwargs) - self.name = kwargs['name'] - self.value = kwargs['value'] - - -class MediaGraphPemCertificateList(MediaGraphCertificateSource): - """A list of PEM formatted certificates. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param certificates: Required. PEM formatted public certificates one per entry. - :type certificates: list[str] - """ - - _validation = { - 'type': {'required': True}, - 'certificates': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'certificates': {'key': 'certificates', 'type': '[str]'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphPemCertificateList, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphPemCertificateList' # type: str - self.certificates = kwargs['certificates'] - - -class MediaGraphRtspSource(MediaGraphSource): - """Enables a graph to capture media from a RTSP server. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. - Possible values include: "Http", "Tcp". - :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport - :param endpoint: Required. RTSP endpoint of the stream that is being connected to. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'endpoint': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'transport': {'key': 'transport', 'type': 'str'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphRtspSource, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphRtspSource' # type: str - self.transport = kwargs.get('transport', None) - self.endpoint = kwargs['endpoint'] - - -class MediaGraphSignalGateProcessor(MediaGraphProcessor): - """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param activation_evaluation_window: The period of time over which the gate gathers input - events, before evaluating them. - :type activation_evaluation_window: str - :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It - is an offset between the time the event is received, and the timestamp of the first media - sample (eg. video frame) that is allowed through by the gate. - :type activation_signal_offset: str - :param minimum_activation_time: The minimum period for which the gate remains open, in the - absence of subsequent triggers (events). - :type minimum_activation_time: str - :param maximum_activation_time: The maximum period for which the gate remains open, in the - presence of subsequent events. - :type maximum_activation_time: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'activation_evaluation_window': {'key': 'activationEvaluationWindow', 'type': 'str'}, - 'activation_signal_offset': {'key': 'activationSignalOffset', 'type': 'str'}, - 'minimum_activation_time': {'key': 'minimumActivationTime', 'type': 'str'}, - 'maximum_activation_time': {'key': 'maximumActivationTime', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphSignalGateProcessor, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphSignalGateProcessor' # type: str - self.activation_evaluation_window = kwargs.get('activation_evaluation_window', None) - self.activation_signal_offset = kwargs.get('activation_signal_offset', None) - self.minimum_activation_time = kwargs.get('minimum_activation_time', None) - self.maximum_activation_time = kwargs.get('maximum_activation_time', None) - - -class MediaGraphSystemData(msrest.serialization.Model): - """Graph system data. - - :param created_at: The timestamp of resource creation (UTC). - :type created_at: ~datetime.datetime - :param last_modified_at: The timestamp of resource last modification (UTC). - :type last_modified_at: ~datetime.datetime - """ - - _attribute_map = { - 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, - 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphSystemData, self).__init__(**kwargs) - self.created_at = kwargs.get('created_at', None) - self.last_modified_at = kwargs.get('last_modified_at', None) - - -class MediaGraphTlsEndpoint(MediaGraphEndpoint): - """An endpoint that the graph can connect to, which must be connected over TLS/SSL. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null - designates that Azure Media Service's source of trust should be used. - :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource - :param validation_options: Validation options to use when authenticating a TLS connection. By - default, strict validation is used. - :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'MediaGraphCertificateSource'}, - 'validation_options': {'key': 'validationOptions', 'type': 'MediaGraphTlsValidationOptions'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphTlsEndpoint, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphTlsEndpoint' # type: str - self.trusted_certificates = kwargs.get('trusted_certificates', None) - self.validation_options = kwargs.get('validation_options', None) - - -class MediaGraphTlsValidationOptions(msrest.serialization.Model): - """Options for controlling the authentication of TLS endpoints. - - :param ignore_hostname: Boolean value ignoring the host name (common name) during validation. - :type ignore_hostname: str - :param ignore_signature: Boolean value ignoring the integrity of the certificate chain at the - current time. - :type ignore_signature: str - """ - - _attribute_map = { - 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, - 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphTlsValidationOptions, self).__init__(**kwargs) - self.ignore_hostname = kwargs.get('ignore_hostname', None) - self.ignore_signature = kwargs.get('ignore_signature', None) - - -class MediaGraphTopology(msrest.serialization.Model): - """Describes a graph topology. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopology, self).__init__(**kwargs) - self.name = kwargs['name'] - self.system_data = kwargs.get('system_data', None) - self.properties = kwargs.get('properties', None) - - -class MediaGraphTopologyCollection(msrest.serialization.Model): - """Collection of graph topologies. - - :param value: Collection of graph topologies. - :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph topologies collection (when the collection contains too many results to return in one - response). - :type continuation_token: str - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': '[MediaGraphTopology]'}, - 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyCollection, self).__init__(**kwargs) - self.value = kwargs.get('value', None) - self.continuation_token = kwargs.get('continuation_token', None) - - -class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): - """MediaGraphTopologyDeleteRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyDeleteRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologyDelete' # type: str - - -class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): - """MediaGraphTopologyGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyGetRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologyGet' # type: str - - -class MediaGraphTopologyListRequest(OperationBase): - """MediaGraphTopologyListRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyListRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologyList' # type: str - - -class MediaGraphTopologyProperties(msrest.serialization.Model): - """Describes the properties of a graph topology. - - :param description: An optional description for the instance. - :type description: str - :param parameters: An optional description for the instance. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] - :param sources: An optional description for the instance. - :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] - :param processors: An optional description for the instance. - :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] - :param sinks: name. - :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] - """ - - _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, - 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDeclaration]'}, - 'sources': {'key': 'sources', 'type': '[MediaGraphSource]'}, - 'processors': {'key': 'processors', 'type': '[MediaGraphProcessor]'}, - 'sinks': {'key': 'sinks', 'type': '[MediaGraphSink]'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyProperties, self).__init__(**kwargs) - self.description = kwargs.get('description', None) - self.parameters = kwargs.get('parameters', None) - self.sources = kwargs.get('sources', None) - self.processors = kwargs.get('processors', None) - self.sinks = kwargs.get('sinks', None) - - -class MediaGraphTopologySetRequest(OperationBase): - """MediaGraphTopologySetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param graph: Required. Describes a graph topology. - :type graph: ~azure.media.lva.edge.models.MediaGraphTopology - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'graph': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologySetRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologySet' # type: str - self.graph = kwargs['graph'] - - -class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): - """MediaGraphTopologySetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologySetRequestBody, self).__init__(**kwargs) - self.method_name = 'MediaGraphTopologySetRequestBody' # type: str - self.method_name = 'MediaGraphTopologySetRequestBody' # type: str - self.name = kwargs['name'] - self.system_data = kwargs.get('system_data', None) - self.properties = kwargs.get('properties', None) - - -class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): - """An endpoint that the media graph can connect to, with no encryption in transit. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphUnsecuredEndpoint, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphUnsecuredEndpoint' # type: str - - -class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): - """Username/password credential pair. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param username: Required. Username for a username/password pair. - :type username: str - :param password: Password for a username/password pair. - :type password: str - """ - - _validation = { - 'type': {'required': True}, - 'username': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'username': {'key': 'username', 'type': 'str'}, - 'password': {'key': 'password', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphUsernamePasswordCredentials, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphUsernamePasswordCredentials' # type: str - self.username = kwargs['username'] - self.password = kwargs.get('password', None) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models_py3.py deleted file mode 100644 index 5de3adde8e11..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/models/_models_py3.py +++ /dev/null @@ -1,2185 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import List, Optional, Union - -import msrest.serialization - -from ._definitionsfor_live_video_analyticson_io_tedge_enums import * - - -class OperationBase(msrest.serialization.Model): - """OperationBase. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - _subtype_map = { - 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(OperationBase, self).__init__(**kwargs) - self.method_name = None # type: Optional[str] - - -class ItemNonSetRequestBase(OperationBase): - """ItemNonSetRequestBase. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphInstanceActivateRequest, MediaGraphInstanceDeActivateRequest, MediaGraphInstanceDeleteRequest, MediaGraphInstanceGetRequest, MediaGraphTopologyDeleteRequest, MediaGraphTopologyGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - _subtype_map = { - 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(ItemNonSetRequestBase, self).__init__(**kwargs) - self.method_name = 'ItemNonSetRequestBase' # type: str - self.name = name - - -class MediaGraphSink(msrest.serialization.Model): - """Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphAssetSink, MediaGraphFileSink, MediaGraphIoTHubMessageSink. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphAssetSink': 'MediaGraphAssetSink', '#Microsoft.Media.MediaGraphFileSink': 'MediaGraphFileSink', '#Microsoft.Media.MediaGraphIoTHubMessageSink': 'MediaGraphIoTHubMessageSink'} - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - **kwargs - ): - super(MediaGraphSink, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = name - self.inputs = inputs - - -class MediaGraphAssetSink(MediaGraphSink): - """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param asset_name_pattern: A name pattern when creating new assets. - :type asset_name_pattern: str - :param segment_length: When writing media to an asset, wait until at least this duration of - media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum - of 30 seconds and a recommended maximum of 5 minutes. - :type segment_length: ~datetime.timedelta - :param local_media_cache_path: Path to a local file system directory for temporary caching of - media, before writing to an Asset. Used when the Edge device is temporarily disconnected from - Azure. - :type local_media_cache_path: str - :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for - temporary caching of media. - :type local_media_cache_maximum_size_mi_b: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, - 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, - 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, - 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - asset_name_pattern: Optional[str] = None, - segment_length: Optional[datetime.timedelta] = None, - local_media_cache_path: Optional[str] = None, - local_media_cache_maximum_size_mi_b: Optional[str] = None, - **kwargs - ): - super(MediaGraphAssetSink, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphAssetSink' # type: str - self.asset_name_pattern = asset_name_pattern - self.segment_length = segment_length - self.local_media_cache_path = local_media_cache_path - self.local_media_cache_maximum_size_mi_b = local_media_cache_maximum_size_mi_b - - -class MediaGraphCertificateSource(msrest.serialization.Model): - """Base class for certificate sources. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphPemCertificateList. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphPemCertificateList': 'MediaGraphPemCertificateList'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphCertificateSource, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphProcessor(msrest.serialization.Model): - """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - **kwargs - ): - super(MediaGraphProcessor, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = name - self.inputs = inputs - - -class MediaGraphExtensionProcessorBase(MediaGraphProcessor): - """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension': 'MediaGraphCognitiveServicesVisionExtension', '#Microsoft.Media.MediaGraphGrpcExtension': 'MediaGraphGrpcExtension', '#Microsoft.Media.MediaGraphHttpExtension': 'MediaGraphHttpExtension'} - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, - **kwargs - ): - super(MediaGraphExtensionProcessorBase, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str - self.endpoint = endpoint - self.image = image - - -class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, - **kwargs - ): - super(MediaGraphCognitiveServicesVisionExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) - self.type = '#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension' # type: str - - -class MediaGraphCredentials(msrest.serialization.Model): - """Credentials to present during authentication. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphHttpHeaderCredentials, MediaGraphUsernamePasswordCredentials. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphHttpHeaderCredentials': 'MediaGraphHttpHeaderCredentials', '#Microsoft.Media.MediaGraphUsernamePasswordCredentials': 'MediaGraphUsernamePasswordCredentials'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphCredentials, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphEndpoint(msrest.serialization.Model): - """Base class for endpoints. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphTlsEndpoint, MediaGraphUnsecuredEndpoint. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphTlsEndpoint': 'MediaGraphTlsEndpoint', '#Microsoft.Media.MediaGraphUnsecuredEndpoint': 'MediaGraphUnsecuredEndpoint'} - } - - def __init__( - self, - *, - url: str, - credentials: Optional["MediaGraphCredentials"] = None, - **kwargs - ): - super(MediaGraphEndpoint, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.credentials = credentials - self.url = url - - -class MediaGraphFileSink(MediaGraphSink): - """Enables a media graph to write/store media (video and audio) to a file on the Edge device. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param file_path_pattern: Required. Absolute file path pattern for creating new files on the - Edge device. - :type file_path_pattern: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - 'file_path_pattern': {'required': True, 'min_length': 1}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - file_path_pattern: str, - **kwargs - ): - super(MediaGraphFileSink, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str - self.file_path_pattern = file_path_pattern - - -class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): - """Limits the frame rate on the input video stream based on the maximumFps property. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not - exceed this limit. - :type maximum_fps: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - maximum_fps: Optional[str] = None, - **kwargs - ): - super(MediaGraphFrameRateFilterProcessor, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str - self.maximum_fps = maximum_fps - - -class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - :param data_transfer: Required. How media should be transferred to the inferencing engine. - :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - 'data_transfer': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - data_transfer: "MediaGraphGrpcExtensionDataTransfer", - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, - **kwargs - ): - super(MediaGraphGrpcExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) - self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str - self.data_transfer = data_transfer - - -class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): - """Describes how media should be transferred to the inferencing engine. - - All required parameters must be populated in order to send to Azure. - - :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if - mode is SharedMemory. Should not be specificed otherwise. - :type shared_memory_size_mi_b: str - :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible - values include: "Embedded", "SharedMemory". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode - """ - - _validation = { - 'mode': {'required': True}, - } - - _attribute_map = { - 'shared_memory_size_mi_b': {'key': 'sharedMemorySizeMiB', 'type': 'str'}, - 'mode': {'key': 'mode', 'type': 'str'}, - } - - def __init__( - self, - *, - mode: Union[str, "MediaGraphGrpcExtensionDataTransferMode"], - shared_memory_size_mi_b: Optional[str] = None, - **kwargs - ): - super(MediaGraphGrpcExtensionDataTransfer, self).__init__(**kwargs) - self.shared_memory_size_mi_b = shared_memory_size_mi_b - self.mode = mode - - -class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, - **kwargs - ): - super(MediaGraphHttpExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) - self.type = '#Microsoft.Media.MediaGraphHttpExtension' # type: str - - -class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): - """Http header service credentials. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param header_name: Required. HTTP header name. - :type header_name: str - :param header_value: Required. HTTP header value. - :type header_value: str - """ - - _validation = { - 'type': {'required': True}, - 'header_name': {'required': True}, - 'header_value': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'header_name': {'key': 'headerName', 'type': 'str'}, - 'header_value': {'key': 'headerValue', 'type': 'str'}, - } - - def __init__( - self, - *, - header_name: str, - header_value: str, - **kwargs - ): - super(MediaGraphHttpHeaderCredentials, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphHttpHeaderCredentials' # type: str - self.header_name = header_name - self.header_value = header_value - - -class MediaGraphImage(msrest.serialization.Model): - """Describes the properties of an image frame. - - :param scale: The scaling mode for the image. - :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale - :param format: Encoding settings for an image. - :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat - """ - - _attribute_map = { - 'scale': {'key': 'scale', 'type': 'MediaGraphImageScale'}, - 'format': {'key': 'format', 'type': 'MediaGraphImageFormat'}, - } - - def __init__( - self, - *, - scale: Optional["MediaGraphImageScale"] = None, - format: Optional["MediaGraphImageFormat"] = None, - **kwargs - ): - super(MediaGraphImage, self).__init__(**kwargs) - self.scale = scale - self.format = format - - -class MediaGraphImageFormat(msrest.serialization.Model): - """Encoding settings for an image. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImageFormat, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphImageFormatEncoded(MediaGraphImageFormat): - """Allowed formats for the image. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param encoding: The different encoding formats that can be used for the image. Possible values - include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". - :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat - :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best - quality). - :type quality: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'encoding': {'key': 'encoding', 'type': 'str'}, - 'quality': {'key': 'quality', 'type': 'str'}, - } - - def __init__( - self, - *, - encoding: Optional[Union[str, "MediaGraphImageEncodingFormat"]] = "Jpeg", - quality: Optional[str] = None, - **kwargs - ): - super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str - self.encoding = encoding - self.quality = quality - - -class MediaGraphImageFormatRaw(MediaGraphImageFormat): - """Encoding settings for raw images. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", - "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". - :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'pixel_format': {'key': 'pixelFormat', 'type': 'str'}, - } - - def __init__( - self, - *, - pixel_format: Optional[Union[str, "MediaGraphImageFormatRawPixelFormat"]] = None, - **kwargs - ): - super(MediaGraphImageFormatRaw, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphImageFormatRaw' # type: str - self.pixel_format = pixel_format - - -class MediaGraphImageScale(msrest.serialization.Model): - """The scaling mode for the image. - - :param mode: Describes the modes for scaling an input video frame into an image, before it is - sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode - :param width: The desired output width of the image. - :type width: str - :param height: The desired output height of the image. - :type height: str - """ - - _attribute_map = { - 'mode': {'key': 'mode', 'type': 'str'}, - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - } - - def __init__( - self, - *, - mode: Optional[Union[str, "MediaGraphImageScaleMode"]] = None, - width: Optional[str] = None, - height: Optional[str] = None, - **kwargs - ): - super(MediaGraphImageScale, self).__init__(**kwargs) - self.mode = mode - self.width = width - self.height = height - - -class MediaGraphInstance(msrest.serialization.Model): - """Represents a Media Graph instance. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, - } - - def __init__( - self, - *, - name: str, - system_data: Optional["MediaGraphSystemData"] = None, - properties: Optional["MediaGraphInstanceProperties"] = None, - **kwargs - ): - super(MediaGraphInstance, self).__init__(**kwargs) - self.name = name - self.system_data = system_data - self.properties = properties - - -class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): - """MediaGraphInstanceActivateRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphInstanceActivateRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphInstanceActivate' # type: str - - -class MediaGraphInstanceCollection(msrest.serialization.Model): - """Collection of graph instances. - - :param value: Collection of graph instances. - :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph instance collection (when the collection contains too many results to return in one - response). - :type continuation_token: str - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': '[MediaGraphInstance]'}, - 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, - } - - def __init__( - self, - *, - value: Optional[List["MediaGraphInstance"]] = None, - continuation_token: Optional[str] = None, - **kwargs - ): - super(MediaGraphInstanceCollection, self).__init__(**kwargs) - self.value = value - self.continuation_token = continuation_token - - -class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): - """MediaGraphInstanceDeActivateRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphInstanceDeActivateRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphInstanceDeactivate' # type: str - - -class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): - """MediaGraphInstanceDeleteRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphInstanceDeleteRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphInstanceDelete' # type: str - - -class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): - """MediaGraphInstanceGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphInstanceGetRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphInstanceGet' # type: str - - -class MediaGraphInstanceListRequest(OperationBase): - """MediaGraphInstanceListRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceListRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceList' # type: str - - -class MediaGraphInstanceProperties(msrest.serialization.Model): - """Properties of a Media Graph instance. - - :param description: An optional description for the instance. - :type description: str - :param topology_name: The name of the graph topology that this instance will run. A topology - with this name should already have been set in the Edge module. - :type topology_name: str - :param parameters: List of one or more graph instance parameters. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] - :param state: Allowed states for a graph Instance. Possible values include: "Inactive", - "Activating", "Active", "Deactivating". - :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState - """ - - _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, - 'topology_name': {'key': 'topologyName', 'type': 'str'}, - 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDefinition]'}, - 'state': {'key': 'state', 'type': 'str'}, - } - - def __init__( - self, - *, - description: Optional[str] = None, - topology_name: Optional[str] = None, - parameters: Optional[List["MediaGraphParameterDefinition"]] = None, - state: Optional[Union[str, "MediaGraphInstanceState"]] = None, - **kwargs - ): - super(MediaGraphInstanceProperties, self).__init__(**kwargs) - self.description = description - self.topology_name = topology_name - self.parameters = parameters - self.state = state - - -class MediaGraphInstanceSetRequest(OperationBase): - """MediaGraphInstanceSetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param instance: Required. Represents a Media Graph instance. - :type instance: ~azure.media.lva.edge.models.MediaGraphInstance - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'instance': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - instance: "MediaGraphInstance", - **kwargs - ): - super(MediaGraphInstanceSetRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceSet' # type: str - self.instance = instance - - -class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): - """MediaGraphInstanceSetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - system_data: Optional["MediaGraphSystemData"] = None, - properties: Optional["MediaGraphInstanceProperties"] = None, - **kwargs - ): - super(MediaGraphInstanceSetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) - self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str - self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str - self.name = name - self.system_data = system_data - self.properties = properties - - -class MediaGraphIoTHubMessageSink(MediaGraphSink): - """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param hub_output_name: Name of the output path to which the graph will publish message. These - messages can then be delivered to desired destinations by declaring routes referencing the - output path in the IoT Edge deployment manifest. - :type hub_output_name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'hub_output_name': {'key': 'hubOutputName', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - hub_output_name: Optional[str] = None, - **kwargs - ): - super(MediaGraphIoTHubMessageSink, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSink' # type: str - self.hub_output_name = hub_output_name - - -class MediaGraphSource(msrest.serialization.Model): - """Media graph source. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphIoTHubMessageSource': 'MediaGraphIoTHubMessageSource', '#Microsoft.Media.MediaGraphRtspSource': 'MediaGraphRtspSource'} - } - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphSource, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = name - - -class MediaGraphIoTHubMessageSource(MediaGraphSource): - """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - :param hub_input_name: Name of the input path where messages can be routed to (via routes - declared in the IoT Edge deployment manifest). - :type hub_input_name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'hub_input_name': {'key': 'hubInputName', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - hub_input_name: Optional[str] = None, - **kwargs - ): - super(MediaGraphIoTHubMessageSource, self).__init__(name=name, **kwargs) - self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSource' # type: str - self.hub_input_name = hub_input_name - - -class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): - """A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param sensitivity: Enumeration that specifies the sensitivity of the motion detection - processor. Possible values include: "Low", "Medium", "High". - :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity - :param output_motion_region: Indicates whether the processor should detect and output the - regions, within the video frame, where motion was detected. Default is true. - :type output_motion_region: bool - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, - 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - sensitivity: Optional[Union[str, "MediaGraphMotionDetectionSensitivity"]] = None, - output_motion_region: Optional[bool] = None, - **kwargs - ): - super(MediaGraphMotionDetectionProcessor, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str - self.sensitivity = sensitivity - self.output_motion_region = output_motion_region - - -class MediaGraphNodeInput(msrest.serialization.Model): - """Represents the input to any node in a media graph. - - :param node_name: The name of another node in the media graph, the output of which is used as - input to this node. - :type node_name: str - :param output_selectors: Allows for the selection of particular streams from another node. - :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] - """ - - _attribute_map = { - 'node_name': {'key': 'nodeName', 'type': 'str'}, - 'output_selectors': {'key': 'outputSelectors', 'type': '[MediaGraphOutputSelector]'}, - } - - def __init__( - self, - *, - node_name: Optional[str] = None, - output_selectors: Optional[List["MediaGraphOutputSelector"]] = None, - **kwargs - ): - super(MediaGraphNodeInput, self).__init__(**kwargs) - self.node_name = node_name - self.output_selectors = output_selectors - - -class MediaGraphOutputSelector(msrest.serialization.Model): - """Allows for the selection of particular streams from another node. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar property: The stream property to compare with. Default value: "mediaType". - :vartype property: str - :param operator: The operator to compare streams by. Possible values include: "is", "isNot". - :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator - :param value: Value to compare against. - :type value: str - """ - - _validation = { - 'property': {'constant': True}, - } - - _attribute_map = { - 'property': {'key': 'property', 'type': 'str'}, - 'operator': {'key': 'operator', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - property = "mediaType" - - def __init__( - self, - *, - operator: Optional[Union[str, "MediaGraphOutputSelectorOperator"]] = None, - value: Optional[str] = None, - **kwargs - ): - super(MediaGraphOutputSelector, self).__init__(**kwargs) - self.operator = operator - self.value = value - - -class MediaGraphParameterDeclaration(msrest.serialization.Model): - """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the parameter. - :type name: str - :param type: Required. name. Possible values include: "String", "SecretString", "Int", - "Double", "Bool". - :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType - :param description: Description of the parameter. - :type description: str - :param default: The default value for the parameter, to be used if the graph instance does not - specify a value. - :type default: str - """ - - _validation = { - 'name': {'required': True, 'max_length': 64, 'min_length': 0}, - 'type': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'default': {'key': 'default', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - type: Union[str, "MediaGraphParameterType"], - description: Optional[str] = None, - default: Optional[str] = None, - **kwargs - ): - super(MediaGraphParameterDeclaration, self).__init__(**kwargs) - self.name = name - self.type = type - self.description = description - self.default = default - - -class MediaGraphParameterDefinition(msrest.serialization.Model): - """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. Name of parameter as defined in the graph topology. - :type name: str - :param value: Required. Value of parameter. - :type value: str - """ - - _validation = { - 'name': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - value: str, - **kwargs - ): - super(MediaGraphParameterDefinition, self).__init__(**kwargs) - self.name = name - self.value = value - - -class MediaGraphPemCertificateList(MediaGraphCertificateSource): - """A list of PEM formatted certificates. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param certificates: Required. PEM formatted public certificates one per entry. - :type certificates: list[str] - """ - - _validation = { - 'type': {'required': True}, - 'certificates': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'certificates': {'key': 'certificates', 'type': '[str]'}, - } - - def __init__( - self, - *, - certificates: List[str], - **kwargs - ): - super(MediaGraphPemCertificateList, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphPemCertificateList' # type: str - self.certificates = certificates - - -class MediaGraphRtspSource(MediaGraphSource): - """Enables a graph to capture media from a RTSP server. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. - Possible values include: "Http", "Tcp". - :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport - :param endpoint: Required. RTSP endpoint of the stream that is being connected to. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'endpoint': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'transport': {'key': 'transport', 'type': 'str'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - } - - def __init__( - self, - *, - name: str, - endpoint: "MediaGraphEndpoint", - transport: Optional[Union[str, "MediaGraphRtspTransport"]] = None, - **kwargs - ): - super(MediaGraphRtspSource, self).__init__(name=name, **kwargs) - self.type = '#Microsoft.Media.MediaGraphRtspSource' # type: str - self.transport = transport - self.endpoint = endpoint - - -class MediaGraphSignalGateProcessor(MediaGraphProcessor): - """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param activation_evaluation_window: The period of time over which the gate gathers input - events, before evaluating them. - :type activation_evaluation_window: str - :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It - is an offset between the time the event is received, and the timestamp of the first media - sample (eg. video frame) that is allowed through by the gate. - :type activation_signal_offset: str - :param minimum_activation_time: The minimum period for which the gate remains open, in the - absence of subsequent triggers (events). - :type minimum_activation_time: str - :param maximum_activation_time: The maximum period for which the gate remains open, in the - presence of subsequent events. - :type maximum_activation_time: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'activation_evaluation_window': {'key': 'activationEvaluationWindow', 'type': 'str'}, - 'activation_signal_offset': {'key': 'activationSignalOffset', 'type': 'str'}, - 'minimum_activation_time': {'key': 'minimumActivationTime', 'type': 'str'}, - 'maximum_activation_time': {'key': 'maximumActivationTime', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - activation_evaluation_window: Optional[str] = None, - activation_signal_offset: Optional[str] = None, - minimum_activation_time: Optional[str] = None, - maximum_activation_time: Optional[str] = None, - **kwargs - ): - super(MediaGraphSignalGateProcessor, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphSignalGateProcessor' # type: str - self.activation_evaluation_window = activation_evaluation_window - self.activation_signal_offset = activation_signal_offset - self.minimum_activation_time = minimum_activation_time - self.maximum_activation_time = maximum_activation_time - - -class MediaGraphSystemData(msrest.serialization.Model): - """Graph system data. - - :param created_at: The timestamp of resource creation (UTC). - :type created_at: ~datetime.datetime - :param last_modified_at: The timestamp of resource last modification (UTC). - :type last_modified_at: ~datetime.datetime - """ - - _attribute_map = { - 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, - 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, - } - - def __init__( - self, - *, - created_at: Optional[datetime.datetime] = None, - last_modified_at: Optional[datetime.datetime] = None, - **kwargs - ): - super(MediaGraphSystemData, self).__init__(**kwargs) - self.created_at = created_at - self.last_modified_at = last_modified_at - - -class MediaGraphTlsEndpoint(MediaGraphEndpoint): - """An endpoint that the graph can connect to, which must be connected over TLS/SSL. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null - designates that Azure Media Service's source of trust should be used. - :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource - :param validation_options: Validation options to use when authenticating a TLS connection. By - default, strict validation is used. - :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'MediaGraphCertificateSource'}, - 'validation_options': {'key': 'validationOptions', 'type': 'MediaGraphTlsValidationOptions'}, - } - - def __init__( - self, - *, - url: str, - credentials: Optional["MediaGraphCredentials"] = None, - trusted_certificates: Optional["MediaGraphCertificateSource"] = None, - validation_options: Optional["MediaGraphTlsValidationOptions"] = None, - **kwargs - ): - super(MediaGraphTlsEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) - self.type = '#Microsoft.Media.MediaGraphTlsEndpoint' # type: str - self.trusted_certificates = trusted_certificates - self.validation_options = validation_options - - -class MediaGraphTlsValidationOptions(msrest.serialization.Model): - """Options for controlling the authentication of TLS endpoints. - - :param ignore_hostname: Boolean value ignoring the host name (common name) during validation. - :type ignore_hostname: str - :param ignore_signature: Boolean value ignoring the integrity of the certificate chain at the - current time. - :type ignore_signature: str - """ - - _attribute_map = { - 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, - 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, - } - - def __init__( - self, - *, - ignore_hostname: Optional[str] = None, - ignore_signature: Optional[str] = None, - **kwargs - ): - super(MediaGraphTlsValidationOptions, self).__init__(**kwargs) - self.ignore_hostname = ignore_hostname - self.ignore_signature = ignore_signature - - -class MediaGraphTopology(msrest.serialization.Model): - """Describes a graph topology. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, - } - - def __init__( - self, - *, - name: str, - system_data: Optional["MediaGraphSystemData"] = None, - properties: Optional["MediaGraphTopologyProperties"] = None, - **kwargs - ): - super(MediaGraphTopology, self).__init__(**kwargs) - self.name = name - self.system_data = system_data - self.properties = properties - - -class MediaGraphTopologyCollection(msrest.serialization.Model): - """Collection of graph topologies. - - :param value: Collection of graph topologies. - :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph topologies collection (when the collection contains too many results to return in one - response). - :type continuation_token: str - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': '[MediaGraphTopology]'}, - 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, - } - - def __init__( - self, - *, - value: Optional[List["MediaGraphTopology"]] = None, - continuation_token: Optional[str] = None, - **kwargs - ): - super(MediaGraphTopologyCollection, self).__init__(**kwargs) - self.value = value - self.continuation_token = continuation_token - - -class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): - """MediaGraphTopologyDeleteRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphTopologyDeleteRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphTopologyDelete' # type: str - - -class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): - """MediaGraphTopologyGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphTopologyGetRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphTopologyGet' # type: str - - -class MediaGraphTopologyListRequest(OperationBase): - """MediaGraphTopologyListRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyListRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologyList' # type: str - - -class MediaGraphTopologyProperties(msrest.serialization.Model): - """Describes the properties of a graph topology. - - :param description: An optional description for the instance. - :type description: str - :param parameters: An optional description for the instance. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] - :param sources: An optional description for the instance. - :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] - :param processors: An optional description for the instance. - :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] - :param sinks: name. - :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] - """ - - _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, - 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDeclaration]'}, - 'sources': {'key': 'sources', 'type': '[MediaGraphSource]'}, - 'processors': {'key': 'processors', 'type': '[MediaGraphProcessor]'}, - 'sinks': {'key': 'sinks', 'type': '[MediaGraphSink]'}, - } - - def __init__( - self, - *, - description: Optional[str] = None, - parameters: Optional[List["MediaGraphParameterDeclaration"]] = None, - sources: Optional[List["MediaGraphSource"]] = None, - processors: Optional[List["MediaGraphProcessor"]] = None, - sinks: Optional[List["MediaGraphSink"]] = None, - **kwargs - ): - super(MediaGraphTopologyProperties, self).__init__(**kwargs) - self.description = description - self.parameters = parameters - self.sources = sources - self.processors = processors - self.sinks = sinks - - -class MediaGraphTopologySetRequest(OperationBase): - """MediaGraphTopologySetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param graph: Required. Describes a graph topology. - :type graph: ~azure.media.lva.edge.models.MediaGraphTopology - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'graph': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - graph: "MediaGraphTopology", - **kwargs - ): - super(MediaGraphTopologySetRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologySet' # type: str - self.graph = graph - - -class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): - """MediaGraphTopologySetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - system_data: Optional["MediaGraphSystemData"] = None, - properties: Optional["MediaGraphTopologyProperties"] = None, - **kwargs - ): - super(MediaGraphTopologySetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) - self.method_name = 'MediaGraphTopologySetRequestBody' # type: str - self.method_name = 'MediaGraphTopologySetRequestBody' # type: str - self.name = name - self.system_data = system_data - self.properties = properties - - -class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): - """An endpoint that the media graph can connect to, with no encryption in transit. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - } - - def __init__( - self, - *, - url: str, - credentials: Optional["MediaGraphCredentials"] = None, - **kwargs - ): - super(MediaGraphUnsecuredEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) - self.type = '#Microsoft.Media.MediaGraphUnsecuredEndpoint' # type: str - - -class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): - """Username/password credential pair. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param username: Required. Username for a username/password pair. - :type username: str - :param password: Password for a username/password pair. - :type password: str - """ - - _validation = { - 'type': {'required': True}, - 'username': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'username': {'key': 'username', 'type': 'str'}, - 'password': {'key': 'password', 'type': 'str'}, - } - - def __init__( - self, - *, - username: str, - password: Optional[str] = None, - **kwargs - ): - super(MediaGraphUsernamePasswordCredentials, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphUsernamePasswordCredentials' # type: str - self.username = username - self.password = password diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/py.typed b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/py.typed deleted file mode 100644 index e5aff4f83af8..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/lva/edge/_generated/py.typed +++ /dev/null @@ -1 +0,0 @@ -# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md b/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md index d318650fa662..8b602ac52507 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md @@ -11,8 +11,8 @@ autorest --v3 --python ```yaml require: C:\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md -output-folder: ../azure/media/lva/edge/_generated -namespace: azure.media.lva.edge +output-folder: ../azure/media/livevideoanalytics/edge/_generated +namespace: azure.media.livevideoanalytics.edge no-namespace-folders: true license-header: MICROSOFT_MIT_NO_VERSION enable-xml: false From c8f67d41ad908c623413a61b083d0dcd27a1516c Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 3 Dec 2020 09:32:15 -0800 Subject: [PATCH 48/64] missed more namespace changes --- .../azure/media/livevideoanalytics/edge/__init__.py | 2 +- .../azure-media-livevideoanalytics-edge/samples/sample_lva.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py index 2a9c3cc68e52..17fe4565d648 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py @@ -1,5 +1,5 @@ __path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore -from azure.media.lva.edge._generated.models import (MediaGraphTopologySetRequestBody, +from azure.media.livevideoanalytics.edge._generated.models import (MediaGraphTopologySetRequestBody, MediaGraphTopologySetRequest, MediaGraphInstanceSetRequest, MediaGraphInstanceSetRequestBody) def _OverrideTopologySetRequestSerialize(self): diff --git a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py index 46a5d64d3c39..c89397b9c30a 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py @@ -1,7 +1,7 @@ import json import os -from azure.media.lva.edge._generated.models import * +from azure.media.livevideoanalytics.edge._generated.models import * from azure.iot.hub import IoTHubRegistryManager from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult from datetime import time From 1182a3c487b0aefaa1f4cd50c1a7af05301944c5 Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 3 Dec 2020 10:11:19 -0800 Subject: [PATCH 49/64] changes based off PR comments --- .../CHANGELOG.md | 4 +- .../README.md | 4 +- .../media/livevideoanalytics/edge/_version.py | 2 +- .../dev_requirements.txt | 2 +- .../samples/sample_conditional_async.py | 48 ------ .../samples/sample_hello_world.py | 35 ---- .../setup.py | 7 +- .../swagger/autorest.md | 2 +- .../swagger/commandOutput.txt | 158 ------------------ 9 files changed, 9 insertions(+), 253 deletions(-) delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/samples/sample_conditional_async.py delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/samples/sample_hello_world.py delete mode 100644 sdk/media/azure-media-livevideoanalytics-edge/swagger/commandOutput.txt diff --git a/sdk/media/azure-media-livevideoanalytics-edge/CHANGELOG.md b/sdk/media/azure-media-livevideoanalytics-edge/CHANGELOG.md index 816f21db092e..ab5c55e39865 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/CHANGELOG.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/CHANGELOG.md @@ -3,6 +3,6 @@ ------------------- -## 0.0.1 (Unreleased) +## 1.0.0b1 (Unreleased) -- Training day! +Initial release diff --git a/sdk/media/azure-media-livevideoanalytics-edge/README.md b/sdk/media/azure-media-livevideoanalytics-edge/README.md index 5e665397682c..4ec628ab28e9 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/README.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/README.md @@ -7,7 +7,7 @@ Use the client library for Live Video Analytics on IoT Edge to: - Simplify interactions with the [Microsoft Azure IoT SDKs](https://github.com/azure/azure-iot-sdks) - Programatically construct media graph topologies and instances -[Package (PyPi)][package] | [Product documentation][doc_product] | [Direct methods][doc_direct_methods] | [Media graphs][doc_media_graph] | [Source code][source] | [Samples][samples] +[Package (PyPI)][package] | [Product documentation][doc_product] | [Direct methods][doc_direct_methods] | [Media graphs][doc_media_graph] | [Source code][source] | [Samples][samples] ## Getting started @@ -16,7 +16,7 @@ Use the client library for Live Video Analytics on IoT Edge to: Install the Live Video Analytics client library for Python with pip: ```bash -pip install azure-media-livevideoanalytics--edge +pip install azure-media-livevideoanalytics-edge ``` ### Prerequisites diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_version.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_version.py index f95f18986f48..6a6e5effdb40 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_version.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_version.py @@ -4,4 +4,4 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------- -VERSION = '0.0.1' +VERSION = '1.0.0b1' diff --git a/sdk/media/azure-media-livevideoanalytics-edge/dev_requirements.txt b/sdk/media/azure-media-livevideoanalytics-edge/dev_requirements.txt index c3cf063e6b31..97e51db43ae3 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/dev_requirements.txt +++ b/sdk/media/azure-media-livevideoanalytics-edge/dev_requirements.txt @@ -1,6 +1,6 @@ -../../core/azure-core -e ../../../tools/azure-devtools -e ../../../tools/azure-sdk-tools +../../core/azure-core -e ../../identity/azure-identity aiohttp>=3.0; python_version >= '3.5' aiodns>=2.0; python_version >= '3.5' diff --git a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_conditional_async.py b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_conditional_async.py deleted file mode 100644 index c894b9b71a09..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_conditional_async.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding: utf-8 - -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import asyncio -import os -from colorama import init, Style, Fore -init() - -from azure.identity.aio import DefaultAzureCredential -from azure.learnappconfig.aio import AppConfigurationClient -from azure.core.exceptions import ResourceNotFoundError, ResourceNotModifiedError -from azure.core import MatchConditions - - -async def main(): - url = os.environ.get('API-LEARN_ENDPOINT') - credential = DefaultAzureCredential() - async with AppConfigurationClient(account_url=url, credential=credential) as client: - - # Retrieve initial color value - try: - first_color = await client.get_configuration_setting(os.environ['API-LEARN_SETTING_COLOR_KEY']) - except ResourceNotFoundError: - raise - - # Get latest color value, only if it has changed - try: - new_color = await client.get_configuration_setting( - key=os.environ['API-LEARN_SETTING_COLOR_KEY'], - match_condition=MatchConditions.IfModified, - etag=first_color.etag - ) - except ResourceNotModifiedError: - new_color = first_color - - color = getattr(Fore, new_color.value.upper()) - greeting = 'Hello!' - print(f'{color}{greeting}{Style.RESET_ALL}') - - -if __name__ == "__main__": - loop = asyncio.get_event_loop() - loop.run_until_complete(main()) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_hello_world.py b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_hello_world.py deleted file mode 100644 index f6fa6e0686fd..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_hello_world.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding: utf-8 - -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from colorama import init, Style, Fore -init() - -from azure.identity import DefaultAzureCredential -from azure.learnappconfig import AppConfigurationClient - -def main(): - url = os.environ.get('API-LEARN_ENDPOINT') - credential = DefaultAzureCredential() - client = AppConfigurationClient(account_url=url, credential=credential) - - try: - color_setting = client.get_configuration_setting(os.environ['API-LEARN_SETTING_COLOR_KEY']) - color = color_setting.value.upper() - text_setting = client.get_configuration_setting(os.environ['API-LEARN_SETTING_TEXT_KEY']) - greeting = text_setting.value - except: - color = 'RED' - greeting = 'Default greeting' - - color = getattr(Fore, color) - print(f'{color}{greeting}{Style.RESET_ALL}') - - -if __name__ == "__main__": - main() diff --git a/sdk/media/azure-media-livevideoanalytics-edge/setup.py b/sdk/media/azure-media-livevideoanalytics-edge/setup.py index 324e31db3312..e1f1f3a85b11 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/setup.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/setup.py @@ -73,7 +73,7 @@ author_email='azpysdkhelp@microsoft.com', url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/appconfiguration/azure-appconfiguration', classifiers=[ - "Development Status :: 5 - Production/Stable", + "Development Status :: 4 - Beta", 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', @@ -82,6 +82,7 @@ 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', 'License :: OSI Approved :: MIT License', ], zip_safe=False, @@ -94,9 +95,5 @@ ":python_version<'3.0'": ['azure-nspkg'], ":python_version<'3.4'": ['enum34>=1.0.4'], ":python_version<'3.5'": ['typing'], - "async:python_version>='3.5'": [ - 'aiohttp>=3.0', - 'aiodns>=2.0' - ], } ) \ No newline at end of file diff --git a/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md b/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md index 8b602ac52507..03aa0ca72f85 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md @@ -1,4 +1,4 @@ -# Azure Queue Storage for Python +# Generate SDK using Autorest see `https://aka.ms/autorest` diff --git a/sdk/media/azure-media-livevideoanalytics-edge/swagger/commandOutput.txt b/sdk/media/azure-media-livevideoanalytics-edge/swagger/commandOutput.txt deleted file mode 100644 index 0290e6671f32..000000000000 --- a/sdk/media/azure-media-livevideoanalytics-edge/swagger/commandOutput.txt +++ /dev/null @@ -1,158 +0,0 @@ -AutoRest code generation utility [cli version: 3.0.6247; node: v12.16.1, max-memory: 2048 gb] -(C) 2018 Microsoft Corporation. -https://aka.ms/autorest -NOTE: AutoRest core version selected from configuration: 3.0.6302. - Loading AutoRest core 'C:\Users\hivyas\.autorest\@autorest_core@3.0.6302\node_modules\@autorest\core\dist' (3.0.6302) - Loading AutoRest extension '@autorest/python' (5.1.0-preview.7->5.1.0-preview.7) - Loading AutoRest extension '@autorest/modelerfour' (4.15.400->4.15.400) - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphTopologyListRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphTopologyGetRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphTopologyDeleteRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceListRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceGetRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceActivateRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceDeActivateRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceDeleteRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphUnsecuredEndpoint' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphCognitiveServicesVisionExtension' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphHttpExtension' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphInstanceCollection' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphTopologyCollection' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphRtspSource' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphIoTHubMessageSource' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphIoTHubMessageSink' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphUsernamePasswordCredentials' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphHttpHeaderCredentials' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphUnsecuredEndpoint' with an undefined type and 'allOf'/'anyOf'/'oneOf' is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphTlsEndpoint' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphPemCertificateList' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphOutputSelector' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphFileSink' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphAssetSink' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphMotionDetectionProcessor' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphExtensionProcessorBase' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphCognitiveServicesVisionExtension' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphGrpcExtension' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphGrpcExtensionDataTransfer' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphHttpExtension' with an undefined type and 'allOf'/'anyOf'/'oneOf' is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphImageFormatRaw' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphImageFormatEncoded' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphSignalGateProcessor' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphFrameRateFilterProcessor' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphRtspSource' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphIoTHubMessageSource' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphIoTHubMessageSink' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphUsernamePasswordCredentials' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphHttpHeaderCredentials' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphTlsEndpoint' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphPemCertificateList' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphFileSink' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphAssetSink' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphMotionDetectionProcessor' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphExtensionProcessorBase' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphGrpcExtension' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphImageFormatRaw' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphImageFormatEncoded' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphSignalGateProcessor' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphFrameRateFilterProcessor' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/CheckDuplicateSchemas): Checking for duplicate schemas, this could take a (long) while. Run with --verbose for more detail. - -WARNING (Modeler/MissingType): The schema 'components·109p5kc·schemas·mediagraphrtspsource·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·109p5kc·schemas·mediagraphrtspsource·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1af9g39·schemas·mediagraphiothubmessagesource·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1af9g39·schemas·mediagraphiothubmessagesource·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1jngw4h·schemas·mediagraphiothubmessagesink·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1jngw4h·schemas·mediagraphiothubmessagesink·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1mxkvbd·schemas·mediagraphusernamepasswordcredentials·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1mxkvbd·schemas·mediagraphusernamepasswordcredentials·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1uqp1b7·schemas·mediagraphhttpheadercredentials·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1uqp1b7·schemas·mediagraphhttpheadercredentials·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·q7dsz6·schemas·mediagraphtlsendpoint·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·q7dsz6·schemas·mediagraphtlsendpoint·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·7b4k0z·schemas·mediagraphpemcertificatelist·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·7b4k0z·schemas·mediagraphpemcertificatelist·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1nh92cj·schemas·mediagraphfilesink·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1nh92cj·schemas·mediagraphfilesink·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·z5bgs5·schemas·mediagraphassetsink·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·z5bgs5·schemas·mediagraphassetsink·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1vu24mc·schemas·mediagraphmotiondetectionprocessor·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1vu24mc·schemas·mediagraphmotiondetectionprocessor·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1axip85·schemas·mediagraphextensionprocessorbase·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1axip85·schemas·mediagraphextensionprocessorbase·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1yl8gs2·schemas·mediagraphgrpcextension·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1yl8gs2·schemas·mediagraphgrpcextension·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1k6pka5·schemas·mediagraphimageformatraw·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1k6pka5·schemas·mediagraphimageformatraw·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·nnu6mb·schemas·mediagraphimageformatencoded·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·nnu6mb·schemas·mediagraphimageformatencoded·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·dx5boa·schemas·mediagraphsignalgateprocessor·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·dx5boa·schemas·mediagraphsignalgateprocessor·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1hcm6ag·schemas·mediagraphframeratefilterprocessor·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1hcm6ag·schemas·mediagraphframeratefilterprocessor·allof·1 -Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? \ No newline at end of file From 0a826b36e7136af6f928aa2b64b7c6c1cfb11d8a Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 4 Dec 2020 08:59:04 -0800 Subject: [PATCH 50/64] sample changes and removing hardcoded strings --- .../edge/_generated/models/__init__.py | 28 +- ..._live_video_analyticson_io_tedge_enums.py} | 22 +- .../edge/_generated/models/_models.py | 486 +++++++++------- .../edge/_generated/models/_models_py3.py | 526 +++++++++++------- .../samples/sample_lva.py | 52 +- .../swagger/autorest.md | 2 +- 6 files changed, 656 insertions(+), 460 deletions(-) rename sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/{_definitionsfor_live_video_analyticson_io_tedge_enums.py => _direct_methodsfor_live_video_analyticson_io_tedge_enums.py} (88%) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/__init__.py index 2e389ab8ef9d..2a0c95b5d2f0 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/__init__.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/__init__.py @@ -15,14 +15,15 @@ from ._models_py3 import MediaGraphEndpoint from ._models_py3 import MediaGraphExtensionProcessorBase from ._models_py3 import MediaGraphFileSink - from ._models_py3 import MediaGraphFrameRateFilterProcessor from ._models_py3 import MediaGraphGrpcExtension from ._models_py3 import MediaGraphGrpcExtensionDataTransfer from ._models_py3 import MediaGraphHttpExtension from ._models_py3 import MediaGraphHttpHeaderCredentials from ._models_py3 import MediaGraphImage from ._models_py3 import MediaGraphImageFormat - from ._models_py3 import MediaGraphImageFormatEncoded + from ._models_py3 import MediaGraphImageFormatBmp + from ._models_py3 import MediaGraphImageFormatJpeg + from ._models_py3 import MediaGraphImageFormatPng from ._models_py3 import MediaGraphImageFormatRaw from ._models_py3 import MediaGraphImageScale from ._models_py3 import MediaGraphInstance @@ -45,6 +46,7 @@ from ._models_py3 import MediaGraphPemCertificateList from ._models_py3 import MediaGraphProcessor from ._models_py3 import MediaGraphRtspSource + from ._models_py3 import MediaGraphSamplingOptions from ._models_py3 import MediaGraphSignalGateProcessor from ._models_py3 import MediaGraphSink from ._models_py3 import MediaGraphSource @@ -61,7 +63,7 @@ from ._models_py3 import MediaGraphTopologySetRequestBody from ._models_py3 import MediaGraphUnsecuredEndpoint from ._models_py3 import MediaGraphUsernamePasswordCredentials - from ._models_py3 import OperationBase + from ._models_py3 import MethodRequest except (SyntaxError, ImportError): from ._models import ItemNonSetRequestBase # type: ignore from ._models import MediaGraphAssetSink # type: ignore @@ -71,14 +73,15 @@ from ._models import MediaGraphEndpoint # type: ignore from ._models import MediaGraphExtensionProcessorBase # type: ignore from ._models import MediaGraphFileSink # type: ignore - from ._models import MediaGraphFrameRateFilterProcessor # type: ignore from ._models import MediaGraphGrpcExtension # type: ignore from ._models import MediaGraphGrpcExtensionDataTransfer # type: ignore from ._models import MediaGraphHttpExtension # type: ignore from ._models import MediaGraphHttpHeaderCredentials # type: ignore from ._models import MediaGraphImage # type: ignore from ._models import MediaGraphImageFormat # type: ignore - from ._models import MediaGraphImageFormatEncoded # type: ignore + from ._models import MediaGraphImageFormatBmp # type: ignore + from ._models import MediaGraphImageFormatJpeg # type: ignore + from ._models import MediaGraphImageFormatPng # type: ignore from ._models import MediaGraphImageFormatRaw # type: ignore from ._models import MediaGraphImageScale # type: ignore from ._models import MediaGraphInstance # type: ignore @@ -101,6 +104,7 @@ from ._models import MediaGraphPemCertificateList # type: ignore from ._models import MediaGraphProcessor # type: ignore from ._models import MediaGraphRtspSource # type: ignore + from ._models import MediaGraphSamplingOptions # type: ignore from ._models import MediaGraphSignalGateProcessor # type: ignore from ._models import MediaGraphSink # type: ignore from ._models import MediaGraphSource # type: ignore @@ -117,11 +121,10 @@ from ._models import MediaGraphTopologySetRequestBody # type: ignore from ._models import MediaGraphUnsecuredEndpoint # type: ignore from ._models import MediaGraphUsernamePasswordCredentials # type: ignore - from ._models import OperationBase # type: ignore + from ._models import MethodRequest # type: ignore -from ._definitionsfor_live_video_analyticson_io_tedge_enums import ( +from ._direct_methodsfor_live_video_analyticson_io_tedge_enums import ( MediaGraphGrpcExtensionDataTransferMode, - MediaGraphImageEncodingFormat, MediaGraphImageFormatRawPixelFormat, MediaGraphImageScaleMode, MediaGraphInstanceState, @@ -140,14 +143,15 @@ 'MediaGraphEndpoint', 'MediaGraphExtensionProcessorBase', 'MediaGraphFileSink', - 'MediaGraphFrameRateFilterProcessor', 'MediaGraphGrpcExtension', 'MediaGraphGrpcExtensionDataTransfer', 'MediaGraphHttpExtension', 'MediaGraphHttpHeaderCredentials', 'MediaGraphImage', 'MediaGraphImageFormat', - 'MediaGraphImageFormatEncoded', + 'MediaGraphImageFormatBmp', + 'MediaGraphImageFormatJpeg', + 'MediaGraphImageFormatPng', 'MediaGraphImageFormatRaw', 'MediaGraphImageScale', 'MediaGraphInstance', @@ -170,6 +174,7 @@ 'MediaGraphPemCertificateList', 'MediaGraphProcessor', 'MediaGraphRtspSource', + 'MediaGraphSamplingOptions', 'MediaGraphSignalGateProcessor', 'MediaGraphSink', 'MediaGraphSource', @@ -186,9 +191,8 @@ 'MediaGraphTopologySetRequestBody', 'MediaGraphUnsecuredEndpoint', 'MediaGraphUsernamePasswordCredentials', - 'OperationBase', + 'MethodRequest', 'MediaGraphGrpcExtensionDataTransferMode', - 'MediaGraphImageEncodingFormat', 'MediaGraphImageFormatRawPixelFormat', 'MediaGraphImageScaleMode', 'MediaGraphInstanceState', diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py similarity index 88% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py rename to sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py index 6e78e4728244..8223cb77e4a2 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py @@ -33,17 +33,7 @@ class MediaGraphGrpcExtensionDataTransferMode(with_metaclass(_CaseInsensitiveEnu EMBEDDED = "Embedded" #: Frames are transferred embedded into the gRPC messages. SHARED_MEMORY = "SharedMemory" #: Frames are transferred through shared memory. -class MediaGraphImageEncodingFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The different encoding formats that can be used for the image. - """ - - JPEG = "Jpeg" #: JPEG image format. - BMP = "Bmp" #: BMP image format. - PNG = "Png" #: PNG image format. - class MediaGraphImageFormatRawPixelFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """pixel format - """ YUV420_P = "Yuv420p" #: Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples). RGB565_BE = "Rgb565be" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian. @@ -67,13 +57,13 @@ class MediaGraphImageScaleMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enu STRETCH = "Stretch" #: Stretch input frame to match given dimensions. class MediaGraphInstanceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Allowed states for a graph Instance. + """Allowed states for a graph instance. """ - INACTIVE = "Inactive" #: Inactive state. - ACTIVATING = "Activating" #: Activating state. - ACTIVE = "Active" #: Active state. - DEACTIVATING = "Deactivating" #: Deactivating state. + INACTIVE = "Inactive" #: The media graph instance is idle and not processing media. + ACTIVATING = "Activating" #: The media graph instance is transitioning into the active state. + ACTIVE = "Active" #: The media graph instance is active and processing media. + DEACTIVATING = "Deactivating" #: The media graph instance is transitioning into the inactive state. class MediaGraphMotionDetectionSensitivity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Enumeration that specifies the sensitivity of the motion detection processor. @@ -91,8 +81,6 @@ class MediaGraphOutputSelectorOperator(with_metaclass(_CaseInsensitiveEnumMeta, IS_NOT = "isNot" #: A media type is not the same type or a subtype. class MediaGraphParameterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """name - """ STRING = "String" #: A string parameter value. SECRET_STRING = "SecretString" #: A string to hold sensitive information as parameter value. diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py index b0cb8248aec0..f49575de77b6 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py @@ -9,8 +9,8 @@ import msrest.serialization -class OperationBase(msrest.serialization.Model): - """OperationBase. +class MethodRequest(msrest.serialization.Model): + """MethodRequest. You probably want to use the sub-classes and not this class directly. Known sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. @@ -19,7 +19,7 @@ class OperationBase(msrest.serialization.Model): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ @@ -37,17 +37,17 @@ class OperationBase(msrest.serialization.Model): 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} } - api_version = "1.0" + api_version = "2.0" def __init__( self, **kwargs ): - super(OperationBase, self).__init__(**kwargs) + super(MethodRequest, self).__init__(**kwargs) self.method_name = None # type: Optional[str] -class ItemNonSetRequestBase(OperationBase): +class ItemNonSetRequestBase(MethodRequest): """ItemNonSetRequestBase. You probably want to use the sub-classes and not this class directly. Known @@ -59,7 +59,7 @@ class ItemNonSetRequestBase(OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -81,7 +81,7 @@ class ItemNonSetRequestBase(OperationBase): 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -136,7 +136,7 @@ def __init__( class MediaGraphAssetSink(MediaGraphSink): - """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. + """Enables a media graph to record media to an Azure Media Services asset for subsequent playback. All required parameters must be populated in order to send to Azure. @@ -147,18 +147,18 @@ class MediaGraphAssetSink(MediaGraphSink): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param asset_name_pattern: A name pattern when creating new assets. + :param asset_name_pattern: Required. A name pattern when creating new assets. :type asset_name_pattern: str :param segment_length: When writing media to an asset, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum of 30 seconds and a recommended maximum of 5 minutes. - :type segment_length: ~datetime.timedelta - :param local_media_cache_path: Path to a local file system directory for temporary caching of - media, before writing to an Asset. Used when the Edge device is temporarily disconnected from - Azure. + :type segment_length: str + :param local_media_cache_path: Required. Path to a local file system directory for temporary + caching of media before writing to an Asset. Used when the Edge device is temporarily + disconnected from Azure. :type local_media_cache_path: str - :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for - temporary caching of media. + :param local_media_cache_maximum_size_mi_b: Required. Maximum amount of disk space that can be + used for temporary caching of media. :type local_media_cache_maximum_size_mi_b: str """ @@ -166,6 +166,9 @@ class MediaGraphAssetSink(MediaGraphSink): 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'asset_name_pattern': {'required': True}, + 'local_media_cache_path': {'required': True}, + 'local_media_cache_maximum_size_mi_b': {'required': True}, } _attribute_map = { @@ -173,7 +176,7 @@ class MediaGraphAssetSink(MediaGraphSink): 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, - 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, + 'segment_length': {'key': 'segmentLength', 'type': 'str'}, 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, } @@ -184,10 +187,10 @@ def __init__( ): super(MediaGraphAssetSink, self).__init__(**kwargs) self.type = '#Microsoft.Media.MediaGraphAssetSink' # type: str - self.asset_name_pattern = kwargs.get('asset_name_pattern', None) + self.asset_name_pattern = kwargs['asset_name_pattern'] self.segment_length = kwargs.get('segment_length', None) - self.local_media_cache_path = kwargs.get('local_media_cache_path', None) - self.local_media_cache_maximum_size_mi_b = kwargs.get('local_media_cache_maximum_size_mi_b', None) + self.local_media_cache_path = kwargs['local_media_cache_path'] + self.local_media_cache_maximum_size_mi_b = kwargs['local_media_cache_maximum_size_mi_b'] class MediaGraphCertificateSource(msrest.serialization.Model): @@ -226,7 +229,7 @@ class MediaGraphProcessor(msrest.serialization.Model): """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. + sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. All required parameters must be populated in order to send to Azure. @@ -252,7 +255,7 @@ class MediaGraphProcessor(msrest.serialization.Model): } _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} + 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} } def __init__( @@ -266,7 +269,7 @@ def __init__( class MediaGraphExtensionProcessorBase(MediaGraphProcessor): - """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. + """Processor that allows for extensions outside of the Live Video Analytics Edge module to be integrated into the graph. It is the base class for various different kinds of extension processor types. You probably want to use the sub-classes and not this class directly. Known sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. @@ -280,16 +283,22 @@ class MediaGraphExtensionProcessorBase(MediaGraphProcessor): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. + :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. + :param image: Required. Describes the parameters of the image that is sent as input to the + endpoint. :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :param sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'endpoint': {'required': True}, + 'image': {'required': True}, } _attribute_map = { @@ -298,6 +307,7 @@ class MediaGraphExtensionProcessorBase(MediaGraphProcessor): 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'sampling_options': {'key': 'samplingOptions', 'type': 'MediaGraphSamplingOptions'}, } _subtype_map = { @@ -310,8 +320,9 @@ def __init__( ): super(MediaGraphExtensionProcessorBase, self).__init__(**kwargs) self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str - self.endpoint = kwargs.get('endpoint', None) - self.image = kwargs.get('image', None) + self.endpoint = kwargs['endpoint'] + self.image = kwargs['image'] + self.sampling_options = kwargs.get('sampling_options', None) class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): @@ -326,16 +337,22 @@ class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBas :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. + :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. + :param image: Required. Describes the parameters of the image that is sent as input to the + endpoint. :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :param sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'endpoint': {'required': True}, + 'image': {'required': True}, } _attribute_map = { @@ -344,6 +361,7 @@ class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBas 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'sampling_options': {'key': 'samplingOptions', 'type': 'MediaGraphSamplingOptions'}, } def __init__( @@ -439,23 +457,33 @@ class MediaGraphFileSink(MediaGraphSink): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param file_path_pattern: Required. Absolute file path pattern for creating new files on the - Edge device. - :type file_path_pattern: str + :param base_directory_path: Required. Absolute directory for all outputs to the Edge device + from this sink. + :type base_directory_path: str + :param file_name_pattern: Required. File name pattern for creating new files on the Edge + device. + :type file_name_pattern: str + :param maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing + files from this sink. + :type maximum_size_mi_b: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, - 'file_path_pattern': {'required': True, 'min_length': 1}, + 'base_directory_path': {'required': True}, + 'file_name_pattern': {'required': True}, + 'maximum_size_mi_b': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, + 'base_directory_path': {'key': 'baseDirectoryPath', 'type': 'str'}, + 'file_name_pattern': {'key': 'fileNamePattern', 'type': 'str'}, + 'maximum_size_mi_b': {'key': 'maximumSizeMiB', 'type': 'str'}, } def __init__( @@ -464,46 +492,9 @@ def __init__( ): super(MediaGraphFileSink, self).__init__(**kwargs) self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str - self.file_path_pattern = kwargs['file_path_pattern'] - - -class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): - """Limits the frame rate on the input video stream based on the maximumFps property. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not - exceed this limit. - :type maximum_fps: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphFrameRateFilterProcessor, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str - self.maximum_fps = kwargs.get('maximum_fps', None) + self.base_directory_path = kwargs['base_directory_path'] + self.file_name_pattern = kwargs['file_name_pattern'] + self.maximum_size_mi_b = kwargs['maximum_size_mi_b'] class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): @@ -518,19 +509,27 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. + :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. + :param image: Required. Describes the parameters of the image that is sent as input to the + endpoint. :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :param sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions :param data_transfer: Required. How media should be transferred to the inferencing engine. :type data_transfer: ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransfer + :param extension_configuration: Optional configuration to pass to the gRPC extension. + :type extension_configuration: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'endpoint': {'required': True}, + 'image': {'required': True}, 'data_transfer': {'required': True}, } @@ -540,7 +539,9 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'sampling_options': {'key': 'samplingOptions', 'type': 'MediaGraphSamplingOptions'}, 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, + 'extension_configuration': {'key': 'extensionConfiguration', 'type': 'str'}, } def __init__( @@ -550,6 +551,7 @@ def __init__( super(MediaGraphGrpcExtension, self).__init__(**kwargs) self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str self.data_transfer = kwargs['data_transfer'] + self.extension_configuration = kwargs.get('extension_configuration', None) class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): @@ -558,7 +560,7 @@ class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if - mode is SharedMemory. Should not be specificed otherwise. + mode is SharedMemory. Should not be specified otherwise. :type shared_memory_size_mi_b: str :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible values include: "Embedded", "SharedMemory". @@ -596,16 +598,22 @@ class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. + :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. + :param image: Required. Describes the parameters of the image that is sent as input to the + endpoint. :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :param sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'endpoint': {'required': True}, + 'image': {'required': True}, } _attribute_map = { @@ -614,6 +622,7 @@ class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'sampling_options': {'key': 'samplingOptions', 'type': 'MediaGraphSamplingOptions'}, } def __init__( @@ -633,7 +642,8 @@ class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): :type type: str :param header_name: Required. HTTP header name. :type header_name: str - :param header_value: Required. HTTP header value. + :param header_value: Required. HTTP header value. Please use a parameter so that the actual + value is not returned on PUT or GET requests. :type header_value: str """ @@ -686,7 +696,7 @@ class MediaGraphImageFormat(msrest.serialization.Model): """Encoding settings for an image. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. + sub-classes are: MediaGraphImageFormatBmp, MediaGraphImageFormatJpeg, MediaGraphImageFormatPng, MediaGraphImageFormatRaw. All required parameters must be populated in order to send to Azure. @@ -703,7 +713,7 @@ class MediaGraphImageFormat(msrest.serialization.Model): } _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} + 'type': {'#Microsoft.Media.MediaGraphImageFormatBmp': 'MediaGraphImageFormatBmp', '#Microsoft.Media.MediaGraphImageFormatJpeg': 'MediaGraphImageFormatJpeg', '#Microsoft.Media.MediaGraphImageFormatPng': 'MediaGraphImageFormatPng', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} } def __init__( @@ -714,19 +724,39 @@ def __init__( self.type = None # type: Optional[str] -class MediaGraphImageFormatEncoded(MediaGraphImageFormat): - """Allowed formats for the image. +class MediaGraphImageFormatBmp(MediaGraphImageFormat): + """Encoding settings for Bmp images. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param encoding: The different encoding formats that can be used for the image. Possible values - include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". - :type encoding: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphImageEncodingFormat - :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best - quality). + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormatBmp, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatBmp' # type: str + + +class MediaGraphImageFormatJpeg(MediaGraphImageFormat): + """Encoding settings for Jpeg images. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param quality: The image quality. Value must be between 0 to 100 (best quality). :type quality: str """ @@ -736,7 +766,6 @@ class MediaGraphImageFormatEncoded(MediaGraphImageFormat): _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, - 'encoding': {'key': 'encoding', 'type': 'str'}, 'quality': {'key': 'quality', 'type': 'str'}, } @@ -744,12 +773,36 @@ def __init__( self, **kwargs ): - super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str - self.encoding = kwargs.get('encoding', "Jpeg") + super(MediaGraphImageFormatJpeg, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatJpeg' # type: str self.quality = kwargs.get('quality', None) +class MediaGraphImageFormatPng(MediaGraphImageFormat): + """Encoding settings for Png images. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormatPng, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatPng' # type: str + + class MediaGraphImageFormatRaw(MediaGraphImageFormat): """Encoding settings for raw images. @@ -757,7 +810,7 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", + :param pixel_format: Required. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". :type pixel_format: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormatRawPixelFormat @@ -765,6 +818,7 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): _validation = { 'type': {'required': True}, + 'pixel_format': {'required': True}, } _attribute_map = { @@ -778,14 +832,17 @@ def __init__( ): super(MediaGraphImageFormatRaw, self).__init__(**kwargs) self.type = '#Microsoft.Media.MediaGraphImageFormatRaw' # type: str - self.pixel_format = kwargs.get('pixel_format', None) + self.pixel_format = kwargs['pixel_format'] class MediaGraphImageScale(msrest.serialization.Model): """The scaling mode for the image. - :param mode: Describes the modes for scaling an input video frame into an image, before it is - sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". + All required parameters must be populated in order to send to Azure. + + :param mode: Required. Describes the modes for scaling an input video frame into an image, + before it is sent to an inference engine. Possible values include: "PreserveAspectRatio", + "Pad", "Stretch". :type mode: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScaleMode :param width: The desired output width of the image. :type width: str @@ -793,6 +850,10 @@ class MediaGraphImageScale(msrest.serialization.Model): :type height: str """ + _validation = { + 'mode': {'required': True}, + } + _attribute_map = { 'mode': {'key': 'mode', 'type': 'str'}, 'width': {'key': 'width', 'type': 'str'}, @@ -804,21 +865,22 @@ def __init__( **kwargs ): super(MediaGraphImageScale, self).__init__(**kwargs) - self.mode = kwargs.get('mode', None) + self.mode = kwargs['mode'] self.width = kwargs.get('width', None) self.height = kwargs.get('height', None) class MediaGraphInstance(msrest.serialization.Model): - """Represents a Media Graph instance. + """Represents an instance of a media graph. All required parameters must be populated in order to send to Azure. - :param name: Required. name. + :param name: Required. :type name: str - :param system_data: Graph system data. + :param system_data: The system data for a resource. This is used by both topologies and + instances. :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. + :param properties: Properties of a media graph instance. :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties """ @@ -851,7 +913,7 @@ class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -869,7 +931,7 @@ class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -880,13 +942,13 @@ def __init__( class MediaGraphInstanceCollection(msrest.serialization.Model): - """Collection of graph instances. + """A collection of media graph instances. - :param value: Collection of graph instances. + :param value: A collection of media graph instances. :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphInstance] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph instance collection (when the collection contains too many results to return in one - response). + :param continuation_token: A continuation token to use in subsequent calls to enumerate through + the graph instance collection. This is used when the collection contains too many results to + return in one response. :type continuation_token: str """ @@ -913,7 +975,7 @@ class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -931,7 +993,7 @@ class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -950,7 +1012,7 @@ class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -968,7 +1030,7 @@ class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -987,7 +1049,7 @@ class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -1005,7 +1067,7 @@ class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1015,14 +1077,14 @@ def __init__( self.method_name = 'GraphInstanceGet' # type: str -class MediaGraphInstanceListRequest(OperationBase): +class MediaGraphInstanceListRequest(MethodRequest): """MediaGraphInstanceListRequest. Variables are only populated by the server, and will be ignored when sending a request. :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ @@ -1036,7 +1098,7 @@ class MediaGraphInstanceListRequest(OperationBase): 'api_version': {'key': '@apiVersion', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1047,17 +1109,17 @@ def __init__( class MediaGraphInstanceProperties(msrest.serialization.Model): - """Properties of a Media Graph instance. + """Properties of a media graph instance. :param description: An optional description for the instance. :type description: str - :param topology_name: The name of the graph topology that this instance will run. A topology - with this name should already have been set in the Edge module. + :param topology_name: The name of the media graph topology that this instance will run. A + topology with this name should already have been set in the Edge module. :type topology_name: str :param parameters: List of one or more graph instance parameters. :type parameters: list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDefinition] - :param state: Allowed states for a graph Instance. Possible values include: "Inactive", + :param state: Allowed states for a graph instance. Possible values include: "Inactive", "Activating", "Active", "Deactivating". :type state: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceState """ @@ -1080,7 +1142,7 @@ def __init__( self.state = kwargs.get('state', None) -class MediaGraphInstanceSetRequest(OperationBase): +class MediaGraphInstanceSetRequest(MethodRequest): """MediaGraphInstanceSetRequest. Variables are only populated by the server, and will be ignored when sending a request. @@ -1089,9 +1151,9 @@ class MediaGraphInstanceSetRequest(OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param instance: Required. Represents a Media Graph instance. + :param instance: Required. Represents an instance of a media graph. :type instance: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstance """ @@ -1107,7 +1169,7 @@ class MediaGraphInstanceSetRequest(OperationBase): 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1118,7 +1180,7 @@ def __init__( self.instance = kwargs['instance'] -class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): +class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): """MediaGraphInstanceSetRequestBody. Variables are only populated by the server, and will be ignored when sending a request. @@ -1127,13 +1189,14 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param name: Required. name. + :param name: Required. :type name: str - :param system_data: Graph system data. + :param system_data: The system data for a resource. This is used by both topologies and + instances. :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. + :param properties: Properties of a media graph instance. :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties """ @@ -1151,7 +1214,7 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1166,7 +1229,7 @@ def __init__( class MediaGraphIoTHubMessageSink(MediaGraphSink): - """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. + """Enables a media graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. All required parameters must be populated in order to send to Azure. @@ -1177,9 +1240,9 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param hub_output_name: Name of the output path to which the graph will publish message. These - messages can then be delivered to desired destinations by declaring routes referencing the - output path in the IoT Edge deployment manifest. + :param hub_output_name: Required. Name of the output path to which the media graph will publish + message. These messages can then be delivered to desired destinations by declaring routes + referencing the output path in the IoT Edge deployment manifest. :type hub_output_name: str """ @@ -1187,6 +1250,7 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'hub_output_name': {'required': True}, } _attribute_map = { @@ -1202,11 +1266,11 @@ def __init__( ): super(MediaGraphIoTHubMessageSink, self).__init__(**kwargs) self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSink' # type: str - self.hub_output_name = kwargs.get('hub_output_name', None) + self.hub_output_name = kwargs['hub_output_name'] class MediaGraphSource(msrest.serialization.Model): - """Media graph source. + """A source node in a media graph. You probably want to use the sub-classes and not this class directly. Known sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. @@ -1244,7 +1308,7 @@ def __init__( class MediaGraphIoTHubMessageSource(MediaGraphSource): - """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. + """Enables a media graph to receive messages via routes declared in the IoT Edge deployment manifest. All required parameters must be populated in order to send to Azure. @@ -1297,6 +1361,8 @@ class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): :param output_motion_region: Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true. :type output_motion_region: bool + :param event_aggregation_window: Event aggregation window duration, or 0 for no aggregation. + :type event_aggregation_window: str """ _validation = { @@ -1311,6 +1377,7 @@ class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, + 'event_aggregation_window': {'key': 'eventAggregationWindow', 'type': 'str'}, } def __init__( @@ -1321,6 +1388,7 @@ def __init__( self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str self.sensitivity = kwargs.get('sensitivity', None) self.output_motion_region = kwargs.get('output_motion_region', None) + self.event_aggregation_window = kwargs.get('event_aggregation_window', None) class MediaGraphNodeInput(msrest.serialization.Model): @@ -1384,19 +1452,19 @@ def __init__( class MediaGraphParameterDeclaration(msrest.serialization.Model): - """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + """The declaration of a parameter in the media graph topology. A media graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the parameter. :type name: str - :param type: Required. name. Possible values include: "String", "SecretString", "Int", - "Double", "Bool". + :param type: Required. Possible values include: "String", "SecretString", "Int", "Double", + "Bool". :type type: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphParameterType :param description: Description of the parameter. :type description: str - :param default: The default value for the parameter, to be used if the graph instance does not - specify a value. + :param default: The default value for the parameter to be used if the media graph instance does + not specify a value. :type default: str """ @@ -1424,11 +1492,11 @@ def __init__( class MediaGraphParameterDefinition(msrest.serialization.Model): - """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + """A key-value pair. A media graph topology allows certain values to be parameterized. When an instance is created, the parameters are supplied with arguments specific to that instance. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. All required parameters must be populated in order to send to Azure. - :param name: Required. Name of parameter as defined in the graph topology. + :param name: Required. Name of parameter as defined in the media graph topology. :type name: str :param value: Required. Value of parameter. :type value: str @@ -1484,7 +1552,7 @@ def __init__( class MediaGraphRtspSource(MediaGraphSource): - """Enables a graph to capture media from a RTSP server. + """Enables a media graph to capture media from a RTSP server. All required parameters must be populated in order to send to Azure. @@ -1523,6 +1591,30 @@ def __init__( self.endpoint = kwargs['endpoint'] +class MediaGraphSamplingOptions(msrest.serialization.Model): + """Describes the properties of a sample. + + :param skip_samples_without_annotation: If true, limits the samples submitted to the extension + to only samples which have associated inference(s). + :type skip_samples_without_annotation: str + :param maximum_samples_per_second: Maximum rate of samples submitted to the extension. + :type maximum_samples_per_second: str + """ + + _attribute_map = { + 'skip_samples_without_annotation': {'key': 'skipSamplesWithoutAnnotation', 'type': 'str'}, + 'maximum_samples_per_second': {'key': 'maximumSamplesPerSecond', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSamplingOptions, self).__init__(**kwargs) + self.skip_samples_without_annotation = kwargs.get('skip_samples_without_annotation', None) + self.maximum_samples_per_second = kwargs.get('maximum_samples_per_second', None) + + class MediaGraphSignalGateProcessor(MediaGraphProcessor): """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. @@ -1536,17 +1628,17 @@ class MediaGraphSignalGateProcessor(MediaGraphProcessor): outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param activation_evaluation_window: The period of time over which the gate gathers input - events, before evaluating them. + events before evaluating them. :type activation_evaluation_window: str - :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It - is an offset between the time the event is received, and the timestamp of the first media - sample (eg. video frame) that is allowed through by the gate. + :param activation_signal_offset: Required. Signal offset once the gate is activated (can be + negative). It is an offset between the time the event is received, and the timestamp of the + first media sample (eg. video frame) that is allowed through by the gate. :type activation_signal_offset: str - :param minimum_activation_time: The minimum period for which the gate remains open, in the - absence of subsequent triggers (events). + :param minimum_activation_time: Required. The minimum period for which the gate remains open in + the absence of subsequent triggers (events). :type minimum_activation_time: str - :param maximum_activation_time: The maximum period for which the gate remains open, in the - presence of subsequent events. + :param maximum_activation_time: Required. The maximum period for which the gate remains open in + the presence of subsequent events. :type maximum_activation_time: str """ @@ -1554,6 +1646,9 @@ class MediaGraphSignalGateProcessor(MediaGraphProcessor): 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'activation_signal_offset': {'required': True}, + 'minimum_activation_time': {'required': True}, + 'maximum_activation_time': {'required': True}, } _attribute_map = { @@ -1573,13 +1668,13 @@ def __init__( super(MediaGraphSignalGateProcessor, self).__init__(**kwargs) self.type = '#Microsoft.Media.MediaGraphSignalGateProcessor' # type: str self.activation_evaluation_window = kwargs.get('activation_evaluation_window', None) - self.activation_signal_offset = kwargs.get('activation_signal_offset', None) - self.minimum_activation_time = kwargs.get('minimum_activation_time', None) - self.maximum_activation_time = kwargs.get('maximum_activation_time', None) + self.activation_signal_offset = kwargs['activation_signal_offset'] + self.minimum_activation_time = kwargs['minimum_activation_time'] + self.maximum_activation_time = kwargs['maximum_activation_time'] class MediaGraphSystemData(msrest.serialization.Model): - """Graph system data. + """The system data for a resource. This is used by both topologies and instances. :param created_at: The timestamp of resource creation (UTC). :type created_at: ~datetime.datetime @@ -1602,7 +1697,7 @@ def __init__( class MediaGraphTlsEndpoint(MediaGraphEndpoint): - """An endpoint that the graph can connect to, which must be connected over TLS/SSL. + """A TLS endpoint for media graph external connections. All required parameters must be populated in order to send to Azure. @@ -1670,15 +1765,16 @@ def __init__( class MediaGraphTopology(msrest.serialization.Model): - """Describes a graph topology. + """A description of a media graph topology. All required parameters must be populated in order to send to Azure. - :param name: Required. name. + :param name: Required. :type name: str - :param system_data: Graph system data. + :param system_data: The system data for a resource. This is used by both topologies and + instances. :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. + :param properties: A description of the properties of a media graph topology. :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties """ @@ -1703,13 +1799,13 @@ def __init__( class MediaGraphTopologyCollection(msrest.serialization.Model): - """Collection of graph topologies. + """A collection of media graph topologies. - :param value: Collection of graph topologies. + :param value: A collection of media graph topologies. :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphTopology] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph topologies collection (when the collection contains too many results to return in one - response). + :param continuation_token: A continuation token to use in subsequent calls to enumerate through + the graph topologies collection. This is used when the collection contains too many results to + return in one response. :type continuation_token: str """ @@ -1736,7 +1832,7 @@ class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -1754,7 +1850,7 @@ class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1773,7 +1869,7 @@ class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -1791,7 +1887,7 @@ class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1801,14 +1897,14 @@ def __init__( self.method_name = 'GraphTopologyGet' # type: str -class MediaGraphTopologyListRequest(OperationBase): +class MediaGraphTopologyListRequest(MethodRequest): """MediaGraphTopologyListRequest. Variables are only populated by the server, and will be ignored when sending a request. :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ @@ -1822,7 +1918,7 @@ class MediaGraphTopologyListRequest(OperationBase): 'api_version': {'key': '@apiVersion', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1833,18 +1929,18 @@ def __init__( class MediaGraphTopologyProperties(msrest.serialization.Model): - """Describes the properties of a graph topology. + """A description of the properties of a media graph topology. - :param description: An optional description for the instance. + :param description: :type description: str - :param parameters: An optional description for the instance. + :param parameters: :type parameters: list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDeclaration] - :param sources: An optional description for the instance. + :param sources: :type sources: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSource] - :param processors: An optional description for the instance. + :param processors: :type processors: list[~azure.media.livevideoanalytics.edge.models.MediaGraphProcessor] - :param sinks: name. + :param sinks: :type sinks: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSink] """ @@ -1868,7 +1964,7 @@ def __init__( self.sinks = kwargs.get('sinks', None) -class MediaGraphTopologySetRequest(OperationBase): +class MediaGraphTopologySetRequest(MethodRequest): """MediaGraphTopologySetRequest. Variables are only populated by the server, and will be ignored when sending a request. @@ -1877,9 +1973,9 @@ class MediaGraphTopologySetRequest(OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param graph: Required. Describes a graph topology. + :param graph: Required. A description of a media graph topology. :type graph: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopology """ @@ -1895,7 +1991,7 @@ class MediaGraphTopologySetRequest(OperationBase): 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1906,7 +2002,7 @@ def __init__( self.graph = kwargs['graph'] -class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): +class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): """MediaGraphTopologySetRequestBody. Variables are only populated by the server, and will be ignored when sending a request. @@ -1915,13 +2011,14 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param name: Required. name. + :param name: Required. :type name: str - :param system_data: Graph system data. + :param system_data: The system data for a resource. This is used by both topologies and + instances. :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. + :param properties: A description of the properties of a media graph topology. :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties """ @@ -1939,7 +2036,7 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1994,7 +2091,8 @@ class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): :type type: str :param username: Required. Username for a username/password pair. :type username: str - :param password: Password for a username/password pair. + :param password: Password for a username/password pair. Please use a parameter so that the + actual value is not returned on PUT or GET requests. :type password: str """ diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py index a71214b4003f..d96578ee0a08 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py @@ -11,11 +11,11 @@ import msrest.serialization -from ._definitionsfor_live_video_analyticson_io_tedge_enums import * +from ._direct_methodsfor_live_video_analyticson_io_tedge_enums import * -class OperationBase(msrest.serialization.Model): - """OperationBase. +class MethodRequest(msrest.serialization.Model): + """MethodRequest. You probably want to use the sub-classes and not this class directly. Known sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. @@ -24,7 +24,7 @@ class OperationBase(msrest.serialization.Model): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ @@ -42,17 +42,17 @@ class OperationBase(msrest.serialization.Model): 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} } - api_version = "1.0" + api_version = "2.0" def __init__( self, **kwargs ): - super(OperationBase, self).__init__(**kwargs) + super(MethodRequest, self).__init__(**kwargs) self.method_name = None # type: Optional[str] -class ItemNonSetRequestBase(OperationBase): +class ItemNonSetRequestBase(MethodRequest): """ItemNonSetRequestBase. You probably want to use the sub-classes and not this class directly. Known @@ -64,7 +64,7 @@ class ItemNonSetRequestBase(OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -86,7 +86,7 @@ class ItemNonSetRequestBase(OperationBase): 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -146,7 +146,7 @@ def __init__( class MediaGraphAssetSink(MediaGraphSink): - """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. + """Enables a media graph to record media to an Azure Media Services asset for subsequent playback. All required parameters must be populated in order to send to Azure. @@ -157,18 +157,18 @@ class MediaGraphAssetSink(MediaGraphSink): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param asset_name_pattern: A name pattern when creating new assets. + :param asset_name_pattern: Required. A name pattern when creating new assets. :type asset_name_pattern: str :param segment_length: When writing media to an asset, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum of 30 seconds and a recommended maximum of 5 minutes. - :type segment_length: ~datetime.timedelta - :param local_media_cache_path: Path to a local file system directory for temporary caching of - media, before writing to an Asset. Used when the Edge device is temporarily disconnected from - Azure. + :type segment_length: str + :param local_media_cache_path: Required. Path to a local file system directory for temporary + caching of media before writing to an Asset. Used when the Edge device is temporarily + disconnected from Azure. :type local_media_cache_path: str - :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for - temporary caching of media. + :param local_media_cache_maximum_size_mi_b: Required. Maximum amount of disk space that can be + used for temporary caching of media. :type local_media_cache_maximum_size_mi_b: str """ @@ -176,6 +176,9 @@ class MediaGraphAssetSink(MediaGraphSink): 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'asset_name_pattern': {'required': True}, + 'local_media_cache_path': {'required': True}, + 'local_media_cache_maximum_size_mi_b': {'required': True}, } _attribute_map = { @@ -183,7 +186,7 @@ class MediaGraphAssetSink(MediaGraphSink): 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, - 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, + 'segment_length': {'key': 'segmentLength', 'type': 'str'}, 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, } @@ -193,10 +196,10 @@ def __init__( *, name: str, inputs: List["MediaGraphNodeInput"], - asset_name_pattern: Optional[str] = None, - segment_length: Optional[datetime.timedelta] = None, - local_media_cache_path: Optional[str] = None, - local_media_cache_maximum_size_mi_b: Optional[str] = None, + asset_name_pattern: str, + local_media_cache_path: str, + local_media_cache_maximum_size_mi_b: str, + segment_length: Optional[str] = None, **kwargs ): super(MediaGraphAssetSink, self).__init__(name=name, inputs=inputs, **kwargs) @@ -243,7 +246,7 @@ class MediaGraphProcessor(msrest.serialization.Model): """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. + sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. All required parameters must be populated in order to send to Azure. @@ -269,7 +272,7 @@ class MediaGraphProcessor(msrest.serialization.Model): } _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} + 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} } def __init__( @@ -286,7 +289,7 @@ def __init__( class MediaGraphExtensionProcessorBase(MediaGraphProcessor): - """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. + """Processor that allows for extensions outside of the Live Video Analytics Edge module to be integrated into the graph. It is the base class for various different kinds of extension processor types. You probably want to use the sub-classes and not this class directly. Known sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. @@ -300,16 +303,22 @@ class MediaGraphExtensionProcessorBase(MediaGraphProcessor): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. + :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. + :param image: Required. Describes the parameters of the image that is sent as input to the + endpoint. :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :param sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'endpoint': {'required': True}, + 'image': {'required': True}, } _attribute_map = { @@ -318,6 +327,7 @@ class MediaGraphExtensionProcessorBase(MediaGraphProcessor): 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'sampling_options': {'key': 'samplingOptions', 'type': 'MediaGraphSamplingOptions'}, } _subtype_map = { @@ -329,14 +339,16 @@ def __init__( *, name: str, inputs: List["MediaGraphNodeInput"], - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, + endpoint: "MediaGraphEndpoint", + image: "MediaGraphImage", + sampling_options: Optional["MediaGraphSamplingOptions"] = None, **kwargs ): super(MediaGraphExtensionProcessorBase, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str self.endpoint = endpoint self.image = image + self.sampling_options = sampling_options class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): @@ -351,16 +363,22 @@ class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBas :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. + :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. + :param image: Required. Describes the parameters of the image that is sent as input to the + endpoint. :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :param sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'endpoint': {'required': True}, + 'image': {'required': True}, } _attribute_map = { @@ -369,6 +387,7 @@ class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBas 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'sampling_options': {'key': 'samplingOptions', 'type': 'MediaGraphSamplingOptions'}, } def __init__( @@ -376,11 +395,12 @@ def __init__( *, name: str, inputs: List["MediaGraphNodeInput"], - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, + endpoint: "MediaGraphEndpoint", + image: "MediaGraphImage", + sampling_options: Optional["MediaGraphSamplingOptions"] = None, **kwargs ): - super(MediaGraphCognitiveServicesVisionExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + super(MediaGraphCognitiveServicesVisionExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, sampling_options=sampling_options, **kwargs) self.type = '#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension' # type: str @@ -472,23 +492,33 @@ class MediaGraphFileSink(MediaGraphSink): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param file_path_pattern: Required. Absolute file path pattern for creating new files on the - Edge device. - :type file_path_pattern: str + :param base_directory_path: Required. Absolute directory for all outputs to the Edge device + from this sink. + :type base_directory_path: str + :param file_name_pattern: Required. File name pattern for creating new files on the Edge + device. + :type file_name_pattern: str + :param maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing + files from this sink. + :type maximum_size_mi_b: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, - 'file_path_pattern': {'required': True, 'min_length': 1}, + 'base_directory_path': {'required': True}, + 'file_name_pattern': {'required': True}, + 'maximum_size_mi_b': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, + 'base_directory_path': {'key': 'baseDirectoryPath', 'type': 'str'}, + 'file_name_pattern': {'key': 'fileNamePattern', 'type': 'str'}, + 'maximum_size_mi_b': {'key': 'maximumSizeMiB', 'type': 'str'}, } def __init__( @@ -496,55 +526,16 @@ def __init__( *, name: str, inputs: List["MediaGraphNodeInput"], - file_path_pattern: str, + base_directory_path: str, + file_name_pattern: str, + maximum_size_mi_b: str, **kwargs ): super(MediaGraphFileSink, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str - self.file_path_pattern = file_path_pattern - - -class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): - """Limits the frame rate on the input video stream based on the maximumFps property. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not - exceed this limit. - :type maximum_fps: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - maximum_fps: Optional[str] = None, - **kwargs - ): - super(MediaGraphFrameRateFilterProcessor, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str - self.maximum_fps = maximum_fps + self.base_directory_path = base_directory_path + self.file_name_pattern = file_name_pattern + self.maximum_size_mi_b = maximum_size_mi_b class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): @@ -559,19 +550,27 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. + :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. + :param image: Required. Describes the parameters of the image that is sent as input to the + endpoint. :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :param sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions :param data_transfer: Required. How media should be transferred to the inferencing engine. :type data_transfer: ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransfer + :param extension_configuration: Optional configuration to pass to the gRPC extension. + :type extension_configuration: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'endpoint': {'required': True}, + 'image': {'required': True}, 'data_transfer': {'required': True}, } @@ -581,7 +580,9 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'sampling_options': {'key': 'samplingOptions', 'type': 'MediaGraphSamplingOptions'}, 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, + 'extension_configuration': {'key': 'extensionConfiguration', 'type': 'str'}, } def __init__( @@ -589,14 +590,17 @@ def __init__( *, name: str, inputs: List["MediaGraphNodeInput"], + endpoint: "MediaGraphEndpoint", + image: "MediaGraphImage", data_transfer: "MediaGraphGrpcExtensionDataTransfer", - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, + sampling_options: Optional["MediaGraphSamplingOptions"] = None, + extension_configuration: Optional[str] = None, **kwargs ): - super(MediaGraphGrpcExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + super(MediaGraphGrpcExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, sampling_options=sampling_options, **kwargs) self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str self.data_transfer = data_transfer + self.extension_configuration = extension_configuration class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): @@ -605,7 +609,7 @@ class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if - mode is SharedMemory. Should not be specificed otherwise. + mode is SharedMemory. Should not be specified otherwise. :type shared_memory_size_mi_b: str :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible values include: "Embedded", "SharedMemory". @@ -646,16 +650,22 @@ class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. + :param endpoint: Required. Endpoint to which this processor should connect. :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. + :param image: Required. Describes the parameters of the image that is sent as input to the + endpoint. :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :param sampling_options: Describes the sampling options to be applied when forwarding samples + to the extension. + :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'endpoint': {'required': True}, + 'image': {'required': True}, } _attribute_map = { @@ -664,6 +674,7 @@ class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'sampling_options': {'key': 'samplingOptions', 'type': 'MediaGraphSamplingOptions'}, } def __init__( @@ -671,11 +682,12 @@ def __init__( *, name: str, inputs: List["MediaGraphNodeInput"], - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, + endpoint: "MediaGraphEndpoint", + image: "MediaGraphImage", + sampling_options: Optional["MediaGraphSamplingOptions"] = None, **kwargs ): - super(MediaGraphHttpExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + super(MediaGraphHttpExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, sampling_options=sampling_options, **kwargs) self.type = '#Microsoft.Media.MediaGraphHttpExtension' # type: str @@ -688,7 +700,8 @@ class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): :type type: str :param header_name: Required. HTTP header name. :type header_name: str - :param header_value: Required. HTTP header value. + :param header_value: Required. HTTP header value. Please use a parameter so that the actual + value is not returned on PUT or GET requests. :type header_value: str """ @@ -747,7 +760,7 @@ class MediaGraphImageFormat(msrest.serialization.Model): """Encoding settings for an image. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. + sub-classes are: MediaGraphImageFormatBmp, MediaGraphImageFormatJpeg, MediaGraphImageFormatPng, MediaGraphImageFormatRaw. All required parameters must be populated in order to send to Azure. @@ -764,7 +777,7 @@ class MediaGraphImageFormat(msrest.serialization.Model): } _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} + 'type': {'#Microsoft.Media.MediaGraphImageFormatBmp': 'MediaGraphImageFormatBmp', '#Microsoft.Media.MediaGraphImageFormatJpeg': 'MediaGraphImageFormatJpeg', '#Microsoft.Media.MediaGraphImageFormatPng': 'MediaGraphImageFormatPng', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} } def __init__( @@ -775,19 +788,39 @@ def __init__( self.type = None # type: Optional[str] -class MediaGraphImageFormatEncoded(MediaGraphImageFormat): - """Allowed formats for the image. +class MediaGraphImageFormatBmp(MediaGraphImageFormat): + """Encoding settings for Bmp images. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param encoding: The different encoding formats that can be used for the image. Possible values - include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". - :type encoding: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphImageEncodingFormat - :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best - quality). + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormatBmp, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatBmp' # type: str + + +class MediaGraphImageFormatJpeg(MediaGraphImageFormat): + """Encoding settings for Jpeg images. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param quality: The image quality. Value must be between 0 to 100 (best quality). :type quality: str """ @@ -797,23 +830,45 @@ class MediaGraphImageFormatEncoded(MediaGraphImageFormat): _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, - 'encoding': {'key': 'encoding', 'type': 'str'}, 'quality': {'key': 'quality', 'type': 'str'}, } def __init__( self, *, - encoding: Optional[Union[str, "MediaGraphImageEncodingFormat"]] = "Jpeg", quality: Optional[str] = None, **kwargs ): - super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str - self.encoding = encoding + super(MediaGraphImageFormatJpeg, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatJpeg' # type: str self.quality = quality +class MediaGraphImageFormatPng(MediaGraphImageFormat): + """Encoding settings for Png images. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormatPng, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatPng' # type: str + + class MediaGraphImageFormatRaw(MediaGraphImageFormat): """Encoding settings for raw images. @@ -821,7 +876,7 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", + :param pixel_format: Required. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". :type pixel_format: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormatRawPixelFormat @@ -829,6 +884,7 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): _validation = { 'type': {'required': True}, + 'pixel_format': {'required': True}, } _attribute_map = { @@ -839,7 +895,7 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): def __init__( self, *, - pixel_format: Optional[Union[str, "MediaGraphImageFormatRawPixelFormat"]] = None, + pixel_format: Union[str, "MediaGraphImageFormatRawPixelFormat"], **kwargs ): super(MediaGraphImageFormatRaw, self).__init__(**kwargs) @@ -850,8 +906,11 @@ def __init__( class MediaGraphImageScale(msrest.serialization.Model): """The scaling mode for the image. - :param mode: Describes the modes for scaling an input video frame into an image, before it is - sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". + All required parameters must be populated in order to send to Azure. + + :param mode: Required. Describes the modes for scaling an input video frame into an image, + before it is sent to an inference engine. Possible values include: "PreserveAspectRatio", + "Pad", "Stretch". :type mode: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScaleMode :param width: The desired output width of the image. :type width: str @@ -859,6 +918,10 @@ class MediaGraphImageScale(msrest.serialization.Model): :type height: str """ + _validation = { + 'mode': {'required': True}, + } + _attribute_map = { 'mode': {'key': 'mode', 'type': 'str'}, 'width': {'key': 'width', 'type': 'str'}, @@ -868,7 +931,7 @@ class MediaGraphImageScale(msrest.serialization.Model): def __init__( self, *, - mode: Optional[Union[str, "MediaGraphImageScaleMode"]] = None, + mode: Union[str, "MediaGraphImageScaleMode"], width: Optional[str] = None, height: Optional[str] = None, **kwargs @@ -880,15 +943,16 @@ def __init__( class MediaGraphInstance(msrest.serialization.Model): - """Represents a Media Graph instance. + """Represents an instance of a media graph. All required parameters must be populated in order to send to Azure. - :param name: Required. name. + :param name: Required. :type name: str - :param system_data: Graph system data. + :param system_data: The system data for a resource. This is used by both topologies and + instances. :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. + :param properties: Properties of a media graph instance. :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties """ @@ -925,7 +989,7 @@ class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -943,7 +1007,7 @@ class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -956,13 +1020,13 @@ def __init__( class MediaGraphInstanceCollection(msrest.serialization.Model): - """Collection of graph instances. + """A collection of media graph instances. - :param value: Collection of graph instances. + :param value: A collection of media graph instances. :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphInstance] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph instance collection (when the collection contains too many results to return in one - response). + :param continuation_token: A continuation token to use in subsequent calls to enumerate through + the graph instance collection. This is used when the collection contains too many results to + return in one response. :type continuation_token: str """ @@ -992,7 +1056,7 @@ class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -1010,7 +1074,7 @@ class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1031,7 +1095,7 @@ class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -1049,7 +1113,7 @@ class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1070,7 +1134,7 @@ class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -1088,7 +1152,7 @@ class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1100,14 +1164,14 @@ def __init__( self.method_name = 'GraphInstanceGet' # type: str -class MediaGraphInstanceListRequest(OperationBase): +class MediaGraphInstanceListRequest(MethodRequest): """MediaGraphInstanceListRequest. Variables are only populated by the server, and will be ignored when sending a request. :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ @@ -1121,7 +1185,7 @@ class MediaGraphInstanceListRequest(OperationBase): 'api_version': {'key': '@apiVersion', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1132,17 +1196,17 @@ def __init__( class MediaGraphInstanceProperties(msrest.serialization.Model): - """Properties of a Media Graph instance. + """Properties of a media graph instance. :param description: An optional description for the instance. :type description: str - :param topology_name: The name of the graph topology that this instance will run. A topology - with this name should already have been set in the Edge module. + :param topology_name: The name of the media graph topology that this instance will run. A + topology with this name should already have been set in the Edge module. :type topology_name: str :param parameters: List of one or more graph instance parameters. :type parameters: list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDefinition] - :param state: Allowed states for a graph Instance. Possible values include: "Inactive", + :param state: Allowed states for a graph instance. Possible values include: "Inactive", "Activating", "Active", "Deactivating". :type state: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceState """ @@ -1170,7 +1234,7 @@ def __init__( self.state = state -class MediaGraphInstanceSetRequest(OperationBase): +class MediaGraphInstanceSetRequest(MethodRequest): """MediaGraphInstanceSetRequest. Variables are only populated by the server, and will be ignored when sending a request. @@ -1179,9 +1243,9 @@ class MediaGraphInstanceSetRequest(OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param instance: Required. Represents a Media Graph instance. + :param instance: Required. Represents an instance of a media graph. :type instance: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstance """ @@ -1197,7 +1261,7 @@ class MediaGraphInstanceSetRequest(OperationBase): 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1210,7 +1274,7 @@ def __init__( self.instance = instance -class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): +class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): """MediaGraphInstanceSetRequestBody. Variables are only populated by the server, and will be ignored when sending a request. @@ -1219,13 +1283,14 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param name: Required. name. + :param name: Required. :type name: str - :param system_data: Graph system data. + :param system_data: The system data for a resource. This is used by both topologies and + instances. :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. + :param properties: Properties of a media graph instance. :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties """ @@ -1243,7 +1308,7 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1262,7 +1327,7 @@ def __init__( class MediaGraphIoTHubMessageSink(MediaGraphSink): - """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. + """Enables a media graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. All required parameters must be populated in order to send to Azure. @@ -1273,9 +1338,9 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param hub_output_name: Name of the output path to which the graph will publish message. These - messages can then be delivered to desired destinations by declaring routes referencing the - output path in the IoT Edge deployment manifest. + :param hub_output_name: Required. Name of the output path to which the media graph will publish + message. These messages can then be delivered to desired destinations by declaring routes + referencing the output path in the IoT Edge deployment manifest. :type hub_output_name: str """ @@ -1283,6 +1348,7 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'hub_output_name': {'required': True}, } _attribute_map = { @@ -1297,7 +1363,7 @@ def __init__( *, name: str, inputs: List["MediaGraphNodeInput"], - hub_output_name: Optional[str] = None, + hub_output_name: str, **kwargs ): super(MediaGraphIoTHubMessageSink, self).__init__(name=name, inputs=inputs, **kwargs) @@ -1306,7 +1372,7 @@ def __init__( class MediaGraphSource(msrest.serialization.Model): - """Media graph source. + """A source node in a media graph. You probably want to use the sub-classes and not this class directly. Known sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. @@ -1346,7 +1412,7 @@ def __init__( class MediaGraphIoTHubMessageSource(MediaGraphSource): - """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. + """Enables a media graph to receive messages via routes declared in the IoT Edge deployment manifest. All required parameters must be populated in order to send to Azure. @@ -1402,6 +1468,8 @@ class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): :param output_motion_region: Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true. :type output_motion_region: bool + :param event_aggregation_window: Event aggregation window duration, or 0 for no aggregation. + :type event_aggregation_window: str """ _validation = { @@ -1416,6 +1484,7 @@ class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, + 'event_aggregation_window': {'key': 'eventAggregationWindow', 'type': 'str'}, } def __init__( @@ -1425,12 +1494,14 @@ def __init__( inputs: List["MediaGraphNodeInput"], sensitivity: Optional[Union[str, "MediaGraphMotionDetectionSensitivity"]] = None, output_motion_region: Optional[bool] = None, + event_aggregation_window: Optional[str] = None, **kwargs ): super(MediaGraphMotionDetectionProcessor, self).__init__(name=name, inputs=inputs, **kwargs) self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str self.sensitivity = sensitivity self.output_motion_region = output_motion_region + self.event_aggregation_window = event_aggregation_window class MediaGraphNodeInput(msrest.serialization.Model): @@ -1500,19 +1571,19 @@ def __init__( class MediaGraphParameterDeclaration(msrest.serialization.Model): - """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + """The declaration of a parameter in the media graph topology. A media graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. All required parameters must be populated in order to send to Azure. :param name: Required. The name of the parameter. :type name: str - :param type: Required. name. Possible values include: "String", "SecretString", "Int", - "Double", "Bool". + :param type: Required. Possible values include: "String", "SecretString", "Int", "Double", + "Bool". :type type: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphParameterType :param description: Description of the parameter. :type description: str - :param default: The default value for the parameter, to be used if the graph instance does not - specify a value. + :param default: The default value for the parameter to be used if the media graph instance does + not specify a value. :type default: str """ @@ -1545,11 +1616,11 @@ def __init__( class MediaGraphParameterDefinition(msrest.serialization.Model): - """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + """A key-value pair. A media graph topology allows certain values to be parameterized. When an instance is created, the parameters are supplied with arguments specific to that instance. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. All required parameters must be populated in order to send to Azure. - :param name: Required. Name of parameter as defined in the graph topology. + :param name: Required. Name of parameter as defined in the media graph topology. :type name: str :param value: Required. Value of parameter. :type value: str @@ -1610,7 +1681,7 @@ def __init__( class MediaGraphRtspSource(MediaGraphSource): - """Enables a graph to capture media from a RTSP server. + """Enables a media graph to capture media from a RTSP server. All required parameters must be populated in order to send to Azure. @@ -1653,6 +1724,33 @@ def __init__( self.endpoint = endpoint +class MediaGraphSamplingOptions(msrest.serialization.Model): + """Describes the properties of a sample. + + :param skip_samples_without_annotation: If true, limits the samples submitted to the extension + to only samples which have associated inference(s). + :type skip_samples_without_annotation: str + :param maximum_samples_per_second: Maximum rate of samples submitted to the extension. + :type maximum_samples_per_second: str + """ + + _attribute_map = { + 'skip_samples_without_annotation': {'key': 'skipSamplesWithoutAnnotation', 'type': 'str'}, + 'maximum_samples_per_second': {'key': 'maximumSamplesPerSecond', 'type': 'str'}, + } + + def __init__( + self, + *, + skip_samples_without_annotation: Optional[str] = None, + maximum_samples_per_second: Optional[str] = None, + **kwargs + ): + super(MediaGraphSamplingOptions, self).__init__(**kwargs) + self.skip_samples_without_annotation = skip_samples_without_annotation + self.maximum_samples_per_second = maximum_samples_per_second + + class MediaGraphSignalGateProcessor(MediaGraphProcessor): """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. @@ -1666,17 +1764,17 @@ class MediaGraphSignalGateProcessor(MediaGraphProcessor): outputs of which are used as input for this processor node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] :param activation_evaluation_window: The period of time over which the gate gathers input - events, before evaluating them. + events before evaluating them. :type activation_evaluation_window: str - :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It - is an offset between the time the event is received, and the timestamp of the first media - sample (eg. video frame) that is allowed through by the gate. + :param activation_signal_offset: Required. Signal offset once the gate is activated (can be + negative). It is an offset between the time the event is received, and the timestamp of the + first media sample (eg. video frame) that is allowed through by the gate. :type activation_signal_offset: str - :param minimum_activation_time: The minimum period for which the gate remains open, in the - absence of subsequent triggers (events). + :param minimum_activation_time: Required. The minimum period for which the gate remains open in + the absence of subsequent triggers (events). :type minimum_activation_time: str - :param maximum_activation_time: The maximum period for which the gate remains open, in the - presence of subsequent events. + :param maximum_activation_time: Required. The maximum period for which the gate remains open in + the presence of subsequent events. :type maximum_activation_time: str """ @@ -1684,6 +1782,9 @@ class MediaGraphSignalGateProcessor(MediaGraphProcessor): 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, + 'activation_signal_offset': {'required': True}, + 'minimum_activation_time': {'required': True}, + 'maximum_activation_time': {'required': True}, } _attribute_map = { @@ -1701,10 +1802,10 @@ def __init__( *, name: str, inputs: List["MediaGraphNodeInput"], + activation_signal_offset: str, + minimum_activation_time: str, + maximum_activation_time: str, activation_evaluation_window: Optional[str] = None, - activation_signal_offset: Optional[str] = None, - minimum_activation_time: Optional[str] = None, - maximum_activation_time: Optional[str] = None, **kwargs ): super(MediaGraphSignalGateProcessor, self).__init__(name=name, inputs=inputs, **kwargs) @@ -1716,7 +1817,7 @@ def __init__( class MediaGraphSystemData(msrest.serialization.Model): - """Graph system data. + """The system data for a resource. This is used by both topologies and instances. :param created_at: The timestamp of resource creation (UTC). :type created_at: ~datetime.datetime @@ -1742,7 +1843,7 @@ def __init__( class MediaGraphTlsEndpoint(MediaGraphEndpoint): - """An endpoint that the graph can connect to, which must be connected over TLS/SSL. + """A TLS endpoint for media graph external connections. All required parameters must be populated in order to send to Azure. @@ -1818,15 +1919,16 @@ def __init__( class MediaGraphTopology(msrest.serialization.Model): - """Describes a graph topology. + """A description of a media graph topology. All required parameters must be populated in order to send to Azure. - :param name: Required. name. + :param name: Required. :type name: str - :param system_data: Graph system data. + :param system_data: The system data for a resource. This is used by both topologies and + instances. :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. + :param properties: A description of the properties of a media graph topology. :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties """ @@ -1855,13 +1957,13 @@ def __init__( class MediaGraphTopologyCollection(msrest.serialization.Model): - """Collection of graph topologies. + """A collection of media graph topologies. - :param value: Collection of graph topologies. + :param value: A collection of media graph topologies. :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphTopology] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph topologies collection (when the collection contains too many results to return in one - response). + :param continuation_token: A continuation token to use in subsequent calls to enumerate through + the graph topologies collection. This is used when the collection contains too many results to + return in one response. :type continuation_token: str """ @@ -1891,7 +1993,7 @@ class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -1909,7 +2011,7 @@ class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1930,7 +2032,7 @@ class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param name: Required. method name. :type name: str @@ -1948,7 +2050,7 @@ class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): 'name': {'key': 'name', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1960,14 +2062,14 @@ def __init__( self.method_name = 'GraphTopologyGet' # type: str -class MediaGraphTopologyListRequest(OperationBase): +class MediaGraphTopologyListRequest(MethodRequest): """MediaGraphTopologyListRequest. Variables are only populated by the server, and will be ignored when sending a request. :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ @@ -1981,7 +2083,7 @@ class MediaGraphTopologyListRequest(OperationBase): 'api_version': {'key': '@apiVersion', 'type': 'str'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -1992,18 +2094,18 @@ def __init__( class MediaGraphTopologyProperties(msrest.serialization.Model): - """Describes the properties of a graph topology. + """A description of the properties of a media graph topology. - :param description: An optional description for the instance. + :param description: :type description: str - :param parameters: An optional description for the instance. + :param parameters: :type parameters: list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDeclaration] - :param sources: An optional description for the instance. + :param sources: :type sources: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSource] - :param processors: An optional description for the instance. + :param processors: :type processors: list[~azure.media.livevideoanalytics.edge.models.MediaGraphProcessor] - :param sinks: name. + :param sinks: :type sinks: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSink] """ @@ -2033,7 +2135,7 @@ def __init__( self.sinks = sinks -class MediaGraphTopologySetRequest(OperationBase): +class MediaGraphTopologySetRequest(MethodRequest): """MediaGraphTopologySetRequest. Variables are only populated by the server, and will be ignored when sending a request. @@ -2042,9 +2144,9 @@ class MediaGraphTopologySetRequest(OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param graph: Required. Describes a graph topology. + :param graph: Required. A description of a media graph topology. :type graph: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopology """ @@ -2060,7 +2162,7 @@ class MediaGraphTopologySetRequest(OperationBase): 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -2073,7 +2175,7 @@ def __init__( self.graph = graph -class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): +class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): """MediaGraphTopologySetRequestBody. Variables are only populated by the server, and will be ignored when sending a request. @@ -2082,13 +2184,14 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): :ivar method_name: method name.Constant filled by server. :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". + :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param name: Required. name. + :param name: Required. :type name: str - :param system_data: Graph system data. + :param system_data: The system data for a resource. This is used by both topologies and + instances. :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. + :param properties: A description of the properties of a media graph topology. :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties """ @@ -2106,7 +2209,7 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, } - api_version = "1.0" + api_version = "2.0" def __init__( self, @@ -2168,7 +2271,8 @@ class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): :type type: str :param username: Required. Username for a username/password pair. :type username: str - :param password: Password for a username/password pair. + :param password: Password for a username/password pair. Please use a parameter so that the + actual value is not returned on PUT or GET requests. :type password: str """ diff --git a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py index c89397b9c30a..5d4949c64960 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py @@ -6,12 +6,12 @@ from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult from datetime import time -device_id = "lva-sample-device" -module_d = "lvaEdge" -connection_string = os.getenv("IOTHUB_DEVICE_CONNECTION_STRING") +device_id = "enter-your-device-name" +module_d = "enter-your-module-name" +connection_string = "enter-your-connection-string" graph_instance_name = "graphInstance1" graph_topology_name = "graphTopology1" -graph_url = '"rtsp://sample-url-from-camera"' +graph_url = "rtsp://sample-url-from-camera" def build_graph_topology(): graph_properties = MediaGraphTopologyProperties() @@ -38,43 +38,45 @@ def build_graph_instance(): return graph_instance -def invoke_method(method): +def invoke_method_helper(method): direct_method = CloudToDeviceMethod(method_name=method.method_name, payload=method.serialize()) registry_manager = IoTHubRegistryManager(connection_string) - return registry_manager.invoke_device_module_method(device_id, module_d, direct_method) + payload = registry_manager.invoke_device_module_method(device_id, module_d, direct_method).payload + if payload is not None and 'error' in payload: + print(payload['error']) + return None + + return payload def main(): graph_topology = build_graph_topology() graph_instance = build_graph_instance() try: - set_graph = invoke_method(MediaGraphTopologySetRequest(graph=graph_topology)) - set_graph_result = MediaGraphTopology.deserialize(set_graph) - - list_graph = invoke_method(MediaGraphTopologyListRequest()) - list_graph_result = MediaGraphTopology.deserialize(list_graph) + set_graph_response = invoke_method_helper(MediaGraphTopologySetRequest(graph=graph_topology)) + + list_graph_response = invoke_method_helper(MediaGraphTopologyListRequest()) + if list_graph_response: + list_graph_result = MediaGraphTopologyCollection.deserialize(list_graph_response) - get_graph = invoke_method(MediaGraphTopologyGetRequest(name=graph_topology_name)) - get_graph_result = MediaGraphTopology.deserialize(get_graph) + get_graph_response = invoke_method_helper(MediaGraphTopologyGetRequest(name=graph_topology_name)) + if get_graph_response: + get_graph_result = MediaGraphTopology.deserialize(get_graph_response) - set_graph_instance = invoke_method(MediaGraphInstanceSetRequest(instance=graph_instance)) - set_graph_instance_result = MediaGraphInstance.deserialize(set_graph_instance) + set_graph_instance_response = invoke_method_helper(MediaGraphInstanceSetRequest(instance=graph_instance)) - activate_graph_instance = invoke_method(MediaGraphInstanceActivateRequest(name=graph_instance_name)) - activate_graph_instance_result = MediaGraphInstance.deserialize(activate_graph_instance) + activate_graph_instance_response = invoke_method_helper(MediaGraphInstanceActivateRequest(name=graph_instance_name)) - get_graph_instance = invoke_method(MediaGraphInstanceGetRequest(name=graph_instance_name)) - get_graph_instance_result = MediaGraphInstance.deserialize(get_graph_instance) + get_graph_instance_response = invoke_method_helper(MediaGraphInstanceGetRequest(name=graph_instance_name)) + if get_graph_instance_response: + get_graph_instance_result = MediaGraphInstance.deserialize(get_graph_instance_response) - deactivate_graph_instance = invoke_method(MediaGraphInstanceDeActivateRequest(name=graph_instance_name)) - deactivate_graph_instance_result = MediaGraphInstance.deserialize(deactivate_graph_instance) + deactivate_graph_instance_response = invoke_method_helper(MediaGraphInstanceDeActivateRequest(name=graph_instance_name)) - delete_graph_instance = invoke_method(MediaGraphInstanceDeleteRequest(name=graph_instance_name)) - delete_graph_instance_result = MediaGraphInstance.deserialize(delete_graph_instance) + delete_graph_instance_response = invoke_method_helper(MediaGraphInstanceDeleteRequest(name=graph_instance_name)) - delete_graph = invoke_method(MediaGraphTopologyDeleteRequest(name=graph_topology_name)) - delete_graph_result = MediaGraphTopology.deserialize(delete_graph) + delete_graph_response = invoke_method_helper(MediaGraphTopologyDeleteRequest(name=graph_topology_name)) except Exception as ex: print(ex) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md b/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md index 03aa0ca72f85..9d8808d4c738 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md @@ -10,7 +10,7 @@ autorest --v3 --python ## Settings ```yaml -require: C:\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md +require: <>Azure\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md output-folder: ../azure/media/livevideoanalytics/edge/_generated namespace: azure.media.livevideoanalytics.edge no-namespace-folders: true From 0b6ae58ad520d6fd79acaf160b25ebcf23ff5bc6 Mon Sep 17 00:00:00 2001 From: hivyas Date: Mon, 7 Dec 2020 11:34:32 -0800 Subject: [PATCH 51/64] updated generated code --- ...r_live_video_analyticson_io_tedge_enums.py | 6 +- .../edge/_generated/models/_models.py | 59 +++++++++++-------- .../edge/_generated/models/_models_py3.py | 59 +++++++++++-------- 3 files changed, 71 insertions(+), 53 deletions(-) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py index 8223cb77e4a2..eb8b24817ee0 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py @@ -27,13 +27,15 @@ def __getattr__(cls, name): class MediaGraphGrpcExtensionDataTransferMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """How frame data should be transmitted to the inferencing engine. + """How frame data should be transmitted to the inference engine. """ EMBEDDED = "Embedded" #: Frames are transferred embedded into the gRPC messages. SHARED_MEMORY = "SharedMemory" #: Frames are transferred through shared memory. class MediaGraphImageFormatRawPixelFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The pixel format that will be used to encode images. + """ YUV420_P = "Yuv420p" #: Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples). RGB565_BE = "Rgb565be" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian. @@ -81,6 +83,8 @@ class MediaGraphOutputSelectorOperator(with_metaclass(_CaseInsensitiveEnumMeta, IS_NOT = "isNot" #: A media type is not the same type or a subtype. class MediaGraphParameterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of the parameter. + """ STRING = "String" #: A string parameter value. SECRET_STRING = "SecretString" #: A string to hold sensitive information as parameter value. diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py index f49575de77b6..4b7f9bc7fcf7 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py @@ -102,7 +102,7 @@ class MediaGraphSink(msrest.serialization.Model): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param name: Required. Name to be used for the media graph sink. + :param name: Required. The name to be used for the media graph sink. :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. @@ -142,12 +142,14 @@ class MediaGraphAssetSink(MediaGraphSink): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param name: Required. Name to be used for the media graph sink. + :param name: Required. The name to be used for the media graph sink. :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param asset_name_pattern: Required. A name pattern when creating new assets. + :param asset_name_pattern: Required. A name pattern when creating new assets. The pattern must + include at least one system variable. See the documentation for available variables and + additional examples. :type asset_name_pattern: str :param segment_length: When writing media to an asset, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum @@ -452,7 +454,7 @@ class MediaGraphFileSink(MediaGraphSink): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param name: Required. Name to be used for the media graph sink. + :param name: Required. The name to be used for the media graph sink. :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. @@ -461,7 +463,8 @@ class MediaGraphFileSink(MediaGraphSink): from this sink. :type base_directory_path: str :param file_name_pattern: Required. File name pattern for creating new files on the Edge - device. + device. The pattern must include at least one system variable. See the documentation for + available variables and additional examples. :type file_name_pattern: str :param maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing files from this sink. @@ -517,7 +520,7 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions - :param data_transfer: Required. How media should be transferred to the inferencing engine. + :param data_transfer: Required. How media should be transferred to the inference engine. :type data_transfer: ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransfer :param extension_configuration: Optional configuration to pass to the gRPC extension. @@ -555,14 +558,14 @@ def __init__( class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): - """Describes how media should be transferred to the inferencing engine. + """Describes how media should be transferred to the inference engine. All required parameters must be populated in order to send to Azure. :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if mode is SharedMemory. Should not be specified otherwise. :type shared_memory_size_mi_b: str - :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible + :param mode: Required. How frame data should be transmitted to the inference engine. Possible values include: "Embedded", "SharedMemory". :type mode: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransferMode @@ -810,8 +813,9 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param pixel_format: Required. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", - "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". + :param pixel_format: Required. The pixel format that will be used to encode images. Possible + values include: "Yuv420p", "Rgb565be", "Rgb565le", "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", + "Argb", "Rgba", "Abgr", "Bgra". :type pixel_format: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormatRawPixelFormat """ @@ -875,7 +879,7 @@ class MediaGraphInstance(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param name: Required. + :param name: Required. The identifier for the media graph instance. :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. @@ -1191,7 +1195,7 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param name: Required. + :param name: Required. The identifier for the media graph instance. :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. @@ -1235,7 +1239,7 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param name: Required. Name to be used for the media graph sink. + :param name: Required. The name to be used for the media graph sink. :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. @@ -1458,8 +1462,8 @@ class MediaGraphParameterDeclaration(msrest.serialization.Model): :param name: Required. The name of the parameter. :type name: str - :param type: Required. Possible values include: "String", "SecretString", "Int", "Double", - "Bool". + :param type: Required. The type of the parameter. Possible values include: "String", + "SecretString", "Int", "Double", "Bool". :type type: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphParameterType :param description: Description of the parameter. :type description: str @@ -1496,9 +1500,10 @@ class MediaGraphParameterDefinition(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param name: Required. Name of parameter as defined in the media graph topology. + :param name: Required. The name of the parameter defined in the media graph topology. :type name: str - :param value: Required. Value of parameter. + :param value: Required. The value to supply for the named parameter defined in the media graph + topology. :type value: str """ @@ -1765,11 +1770,11 @@ def __init__( class MediaGraphTopology(msrest.serialization.Model): - """A description of a media graph topology. + """The definition of a media graph topology. All required parameters must be populated in order to send to Azure. - :param name: Required. + :param name: Required. The identifier for the media graph topology. :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. @@ -1931,16 +1936,18 @@ def __init__( class MediaGraphTopologyProperties(msrest.serialization.Model): """A description of the properties of a media graph topology. - :param description: + :param description: A description of a media graph topology. It is recommended to use this to + describe the expected use of the topology. :type description: str - :param parameters: + :param parameters: The list of parameters defined in the topology. The value for these + parameters are supplied by instances of this topology. :type parameters: list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDeclaration] - :param sources: + :param sources: The list of source nodes in this topology. :type sources: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSource] - :param processors: + :param processors: The list of processor nodes in this topology. :type processors: list[~azure.media.livevideoanalytics.edge.models.MediaGraphProcessor] - :param sinks: + :param sinks: The list of sink nodes in this topology. :type sinks: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSink] """ @@ -1975,7 +1982,7 @@ class MediaGraphTopologySetRequest(MethodRequest): :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param graph: Required. A description of a media graph topology. + :param graph: Required. The definition of a media graph topology. :type graph: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopology """ @@ -2013,7 +2020,7 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param name: Required. + :param name: Required. The identifier for the media graph topology. :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py index d96578ee0a08..9dc0d776b487 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py @@ -109,7 +109,7 @@ class MediaGraphSink(msrest.serialization.Model): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param name: Required. Name to be used for the media graph sink. + :param name: Required. The name to be used for the media graph sink. :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. @@ -152,12 +152,14 @@ class MediaGraphAssetSink(MediaGraphSink): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param name: Required. Name to be used for the media graph sink. + :param name: Required. The name to be used for the media graph sink. :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] - :param asset_name_pattern: Required. A name pattern when creating new assets. + :param asset_name_pattern: Required. A name pattern when creating new assets. The pattern must + include at least one system variable. See the documentation for available variables and + additional examples. :type asset_name_pattern: str :param segment_length: When writing media to an asset, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum @@ -487,7 +489,7 @@ class MediaGraphFileSink(MediaGraphSink): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param name: Required. Name to be used for the media graph sink. + :param name: Required. The name to be used for the media graph sink. :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. @@ -496,7 +498,8 @@ class MediaGraphFileSink(MediaGraphSink): from this sink. :type base_directory_path: str :param file_name_pattern: Required. File name pattern for creating new files on the Edge - device. + device. The pattern must include at least one system variable. See the documentation for + available variables and additional examples. :type file_name_pattern: str :param maximum_size_mi_b: Required. Maximum amount of disk space that can be used for storing files from this sink. @@ -558,7 +561,7 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions - :param data_transfer: Required. How media should be transferred to the inferencing engine. + :param data_transfer: Required. How media should be transferred to the inference engine. :type data_transfer: ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransfer :param extension_configuration: Optional configuration to pass to the gRPC extension. @@ -604,14 +607,14 @@ def __init__( class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): - """Describes how media should be transferred to the inferencing engine. + """Describes how media should be transferred to the inference engine. All required parameters must be populated in order to send to Azure. :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if mode is SharedMemory. Should not be specified otherwise. :type shared_memory_size_mi_b: str - :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible + :param mode: Required. How frame data should be transmitted to the inference engine. Possible values include: "Embedded", "SharedMemory". :type mode: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransferMode @@ -876,8 +879,9 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param pixel_format: Required. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", - "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". + :param pixel_format: Required. The pixel format that will be used to encode images. Possible + values include: "Yuv420p", "Rgb565be", "Rgb565le", "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", + "Argb", "Rgba", "Abgr", "Bgra". :type pixel_format: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormatRawPixelFormat """ @@ -947,7 +951,7 @@ class MediaGraphInstance(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param name: Required. + :param name: Required. The identifier for the media graph instance. :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. @@ -1285,7 +1289,7 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param name: Required. + :param name: Required. The identifier for the media graph instance. :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. @@ -1333,7 +1337,7 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str - :param name: Required. Name to be used for the media graph sink. + :param name: Required. The name to be used for the media graph sink. :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. @@ -1577,8 +1581,8 @@ class MediaGraphParameterDeclaration(msrest.serialization.Model): :param name: Required. The name of the parameter. :type name: str - :param type: Required. Possible values include: "String", "SecretString", "Int", "Double", - "Bool". + :param type: Required. The type of the parameter. Possible values include: "String", + "SecretString", "Int", "Double", "Bool". :type type: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphParameterType :param description: Description of the parameter. :type description: str @@ -1620,9 +1624,10 @@ class MediaGraphParameterDefinition(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. - :param name: Required. Name of parameter as defined in the media graph topology. + :param name: Required. The name of the parameter defined in the media graph topology. :type name: str - :param value: Required. Value of parameter. + :param value: Required. The value to supply for the named parameter defined in the media graph + topology. :type value: str """ @@ -1919,11 +1924,11 @@ def __init__( class MediaGraphTopology(msrest.serialization.Model): - """A description of a media graph topology. + """The definition of a media graph topology. All required parameters must be populated in order to send to Azure. - :param name: Required. + :param name: Required. The identifier for the media graph topology. :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. @@ -2096,16 +2101,18 @@ def __init__( class MediaGraphTopologyProperties(msrest.serialization.Model): """A description of the properties of a media graph topology. - :param description: + :param description: A description of a media graph topology. It is recommended to use this to + describe the expected use of the topology. :type description: str - :param parameters: + :param parameters: The list of parameters defined in the topology. The value for these + parameters are supplied by instances of this topology. :type parameters: list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDeclaration] - :param sources: + :param sources: The list of source nodes in this topology. :type sources: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSource] - :param processors: + :param processors: The list of processor nodes in this topology. :type processors: list[~azure.media.livevideoanalytics.edge.models.MediaGraphProcessor] - :param sinks: + :param sinks: The list of sink nodes in this topology. :type sinks: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSink] """ @@ -2146,7 +2153,7 @@ class MediaGraphTopologySetRequest(MethodRequest): :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param graph: Required. A description of a media graph topology. + :param graph: Required. The definition of a media graph topology. :type graph: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopology """ @@ -2186,7 +2193,7 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str - :param name: Required. + :param name: Required. The identifier for the media graph topology. :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. From 7993cfb44d4da0b7bd333d475087acb6fdbec18a Mon Sep 17 00:00:00 2001 From: hivyas Date: Mon, 7 Dec 2020 18:59:48 -0800 Subject: [PATCH 52/64] updating swagger and import statement --- .../README.md | 3 + .../media/livevideoanalytics/edge/__init__.py | 65 ++++++++++++++++++- .../samples/sample_lva.py | 3 +- 3 files changed, 68 insertions(+), 3 deletions(-) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/README.md b/sdk/media/azure-media-livevideoanalytics-edge/README.md index 4ec628ab28e9..9e84ab1cd61e 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/README.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/README.md @@ -23,6 +23,9 @@ pip install azure-media-livevideoanalytics-edge * Python 2.7, or 3.5 or later is required to use this package. * You need an active [Azure subscription][azure_sub], and a [IoT device connection string][iot_device_connection_string] to use this package. +| SDK | LVA Edge Module | +|---|---| +| 1.0.0b1 | 2.0 | ### Creating a graph topology and making requests Please visit the [Examples](#examples) for starter code ## Key concepts diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py index 17fe4565d648..94a917bccd2b 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py @@ -1,7 +1,68 @@ __path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore -from azure.media.livevideoanalytics.edge._generated.models import (MediaGraphTopologySetRequestBody, -MediaGraphTopologySetRequest, MediaGraphInstanceSetRequest, MediaGraphInstanceSetRequestBody) +#from azure.media.livevideoanalytics.edge._generated.models import (MediaGraphTopologySetRequestBody, +#MediaGraphTopologySetRequest, MediaGraphInstanceSetRequest, MediaGraphInstanceSetRequestBody) +from ._generated.models import * + +__all__ = [ + "MethodRequest", + "ItemNonSetRequestBase", + "MediaGraphSink" + "MediaGraphAssetSink", + "MediaGraphCertificateSource", + "MediaGraphProcessor", + "MediaGraphExtensionProcessorBase", + "MediaGraphCognitiveServicesVisionExtension", + "MediaGraphCredentials", + "MediaGraphEndpoint", + "MediaGraphFileSink", + "MediaGraphGrpcExtension", + "MediaGraphGrpcExtensionDataTransfer", + "MediaGraphHttpExtension", + "MediaGraphHttpHeaderCredentials", + "MediaGraphImage", + "MediaGraphImageFormat", + "MediaGraphImageFormatBmp", + "MediaGraphImageFormatJpeg", + "MediaGraphImageFormatPng", + "MediaGraphImageFormatRaw", + "MediaGraphImageScale", + "MediaGraphInstance", + "MediaGraphInstanceActivateRequest", + "MediaGraphInstanceCollection", + "MediaGraphInstanceDeActivateRequest", + "MediaGraphInstanceDeleteRequest", + "MediaGraphInstanceGetRequest", + "MediaGraphInstanceListRequest", + "MediaGraphInstanceProperties", + "MediaGraphInstanceSetRequest", + "MediaGraphInstanceSetRequestBody", + "MediaGraphIoTHubMessageSink", + "MediaGraphSource", + "MediaGraphIoTHubMessageSource", + "MediaGraphMotionDetectionProcessor", + "MediaGraphNodeInput", + "MediaGraphOutputSelector", + "MediaGraphParameterDeclaration", + "MediaGraphParameterDefinition", + "MediaGraphPemCertificateList", + "MediaGraphRtspSource", + "MediaGraphSamplingOptions", + "MediaGraphSignalGateProcessor", + "MediaGraphSystemData", + "MediaGraphTlsEndpoint", + "MediaGraphTlsValidationOptions", + "MediaGraphTopology", + "MediaGraphTopologyCollection", + "MediaGraphTopologyDeleteRequest", + "MediaGraphTopologyGetRequest", + "MediaGraphTopologyListRequest", + "MediaGraphTopologyProperties", + "MediaGraphTopologySetRequest", + "MediaGraphTopologySetRequestBody", + "MediaGraphUnsecuredEndpoint", + "MediaGraphUsernamePasswordCredentials" +] def _OverrideTopologySetRequestSerialize(self): graph_body = MediaGraphTopologySetRequestBody(name=self.graph.name) graph_body.system_data = self.graph.system_data diff --git a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py index 5d4949c64960..f5fa934fb6fe 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py @@ -1,7 +1,8 @@ import json import os -from azure.media.livevideoanalytics.edge._generated.models import * +#from azure.media.livevideoanalytics.edge._generated.models import * +from azure.media.livevideoanalytics.edge import * from azure.iot.hub import IoTHubRegistryManager from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult from datetime import time From d41e14f9ff61df90297d9f913a335eb8dd14ce2f Mon Sep 17 00:00:00 2001 From: Laurent Mazuel Date: Tue, 8 Dec 2020 09:25:47 -0800 Subject: [PATCH 53/64] Improve import system --- .../media/livevideoanalytics/edge/__init__.py | 66 +------------------ 1 file changed, 3 insertions(+), 63 deletions(-) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py index 94a917bccd2b..7f07d48526e2 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py @@ -1,68 +1,8 @@ -__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore -#from azure.media.livevideoanalytics.edge._generated.models import (MediaGraphTopologySetRequestBody, -#MediaGraphTopologySetRequest, MediaGraphInstanceSetRequest, MediaGraphInstanceSetRequestBody) - from ._generated.models import * +from ._generated import models + +__all__ = models.__all__ -__all__ = [ - "MethodRequest", - "ItemNonSetRequestBase", - "MediaGraphSink" - "MediaGraphAssetSink", - "MediaGraphCertificateSource", - "MediaGraphProcessor", - "MediaGraphExtensionProcessorBase", - "MediaGraphCognitiveServicesVisionExtension", - "MediaGraphCredentials", - "MediaGraphEndpoint", - "MediaGraphFileSink", - "MediaGraphGrpcExtension", - "MediaGraphGrpcExtensionDataTransfer", - "MediaGraphHttpExtension", - "MediaGraphHttpHeaderCredentials", - "MediaGraphImage", - "MediaGraphImageFormat", - "MediaGraphImageFormatBmp", - "MediaGraphImageFormatJpeg", - "MediaGraphImageFormatPng", - "MediaGraphImageFormatRaw", - "MediaGraphImageScale", - "MediaGraphInstance", - "MediaGraphInstanceActivateRequest", - "MediaGraphInstanceCollection", - "MediaGraphInstanceDeActivateRequest", - "MediaGraphInstanceDeleteRequest", - "MediaGraphInstanceGetRequest", - "MediaGraphInstanceListRequest", - "MediaGraphInstanceProperties", - "MediaGraphInstanceSetRequest", - "MediaGraphInstanceSetRequestBody", - "MediaGraphIoTHubMessageSink", - "MediaGraphSource", - "MediaGraphIoTHubMessageSource", - "MediaGraphMotionDetectionProcessor", - "MediaGraphNodeInput", - "MediaGraphOutputSelector", - "MediaGraphParameterDeclaration", - "MediaGraphParameterDefinition", - "MediaGraphPemCertificateList", - "MediaGraphRtspSource", - "MediaGraphSamplingOptions", - "MediaGraphSignalGateProcessor", - "MediaGraphSystemData", - "MediaGraphTlsEndpoint", - "MediaGraphTlsValidationOptions", - "MediaGraphTopology", - "MediaGraphTopologyCollection", - "MediaGraphTopologyDeleteRequest", - "MediaGraphTopologyGetRequest", - "MediaGraphTopologyListRequest", - "MediaGraphTopologyProperties", - "MediaGraphTopologySetRequest", - "MediaGraphTopologySetRequestBody", - "MediaGraphUnsecuredEndpoint", - "MediaGraphUsernamePasswordCredentials" -] def _OverrideTopologySetRequestSerialize(self): graph_body = MediaGraphTopologySetRequestBody(name=self.graph.name) graph_body.system_data = self.graph.system_data From bf7e61bb597592dd546bcc75322342372dc556f8 Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 10 Dec 2020 09:42:59 -0800 Subject: [PATCH 54/64] regenerated using public swagger --- .../README.md | 7 ++- .../edge/_generated/models/_models.py | 62 ++++++++++--------- .../edge/_generated/models/_models_py3.py | 62 ++++++++++--------- 3 files changed, 72 insertions(+), 59 deletions(-) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/README.md b/sdk/media/azure-media-livevideoanalytics-edge/README.md index 9e84ab1cd61e..ee06ce1f1c4f 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/README.md +++ b/sdk/media/azure-media-livevideoanalytics-edge/README.md @@ -22,10 +22,11 @@ pip install azure-media-livevideoanalytics-edge * Python 2.7, or 3.5 or later is required to use this package. * You need an active [Azure subscription][azure_sub], and a [IoT device connection string][iot_device_connection_string] to use this package. +* You will need to use the version of the SDK that corresponds to the version of the LVA Edge module you are using. -| SDK | LVA Edge Module | -|---|---| -| 1.0.0b1 | 2.0 | + | SDK | LVA Edge Module | + |---|---| + | 1.0.0b1 | 2.0 | ### Creating a graph topology and making requests Please visit the [Examples](#examples) for starter code ## Key concepts diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py index 4b7f9bc7fcf7..e4139c77881c 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py @@ -17,14 +17,16 @@ class MethodRequest(msrest.serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar method_name: method name.Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, } @@ -57,7 +59,7 @@ class ItemNonSetRequestBase(MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -66,7 +68,7 @@ class ItemNonSetRequestBase(MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -915,7 +917,7 @@ class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -924,7 +926,7 @@ class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -977,7 +979,7 @@ class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -986,7 +988,7 @@ class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1014,7 +1016,7 @@ class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1023,7 +1025,7 @@ class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1051,7 +1053,7 @@ class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1060,7 +1062,7 @@ class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1086,14 +1088,16 @@ class MediaGraphInstanceListRequest(MethodRequest): Variables are only populated by the server, and will be ignored when sending a request. - :ivar method_name: method name.Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, } @@ -1153,7 +1157,7 @@ class MediaGraphInstanceSetRequest(MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1162,7 +1166,7 @@ class MediaGraphInstanceSetRequest(MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'instance': {'required': True}, } @@ -1191,7 +1195,7 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1205,7 +1209,7 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1835,7 +1839,7 @@ class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1844,7 +1848,7 @@ class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1872,7 +1876,7 @@ class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1881,7 +1885,7 @@ class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1907,14 +1911,16 @@ class MediaGraphTopologyListRequest(MethodRequest): Variables are only populated by the server, and will be ignored when sending a request. - :ivar method_name: method name.Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, } @@ -1978,7 +1984,7 @@ class MediaGraphTopologySetRequest(MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1987,7 +1993,7 @@ class MediaGraphTopologySetRequest(MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'graph': {'required': True}, } @@ -2016,7 +2022,7 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -2030,7 +2036,7 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py index 9dc0d776b487..054dda46d4a6 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py +++ b/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py @@ -22,14 +22,16 @@ class MethodRequest(msrest.serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar method_name: method name.Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, } @@ -62,7 +64,7 @@ class ItemNonSetRequestBase(MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -71,7 +73,7 @@ class ItemNonSetRequestBase(MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -991,7 +993,7 @@ class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1000,7 +1002,7 @@ class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1058,7 +1060,7 @@ class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1067,7 +1069,7 @@ class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1097,7 +1099,7 @@ class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1106,7 +1108,7 @@ class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1136,7 +1138,7 @@ class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1145,7 +1147,7 @@ class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1173,14 +1175,16 @@ class MediaGraphInstanceListRequest(MethodRequest): Variables are only populated by the server, and will be ignored when sending a request. - :ivar method_name: method name.Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, } @@ -1245,7 +1249,7 @@ class MediaGraphInstanceSetRequest(MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1254,7 +1258,7 @@ class MediaGraphInstanceSetRequest(MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'instance': {'required': True}, } @@ -1285,7 +1289,7 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -1299,7 +1303,7 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -1996,7 +2000,7 @@ class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -2005,7 +2009,7 @@ class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -2035,7 +2039,7 @@ class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -2044,7 +2048,7 @@ class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } @@ -2072,14 +2076,16 @@ class MediaGraphTopologyListRequest(MethodRequest): Variables are only populated by the server, and will be ignored when sending a request. - :ivar method_name: method name.Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, } @@ -2149,7 +2155,7 @@ class MediaGraphTopologySetRequest(MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -2158,7 +2164,7 @@ class MediaGraphTopologySetRequest(MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'graph': {'required': True}, } @@ -2189,7 +2195,7 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): All required parameters must be populated in order to send to Azure. - :ivar method_name: method name.Constant filled by server. + :ivar method_name: Required. method name.Constant filled by server. :vartype method_name: str :ivar api_version: api version. Default value: "2.0". :vartype api_version: str @@ -2203,7 +2209,7 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): """ _validation = { - 'method_name': {'readonly': True}, + 'method_name': {'required': True, 'readonly': True}, 'api_version': {'constant': True}, 'name': {'required': True}, } From cfd53fd04937d13839050c91b61c33e35ba87353 Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 10 Dec 2020 15:27:21 -0800 Subject: [PATCH 55/64] renaming package and folder --- .../CHANGELOG.md | 0 .../MANIFEST.in | 0 .../README.md | 0 .../azure/__init__.py | 0 .../azure/media/__init__.py | 0 .../azure/media/analytics}/__init__.py | 0 .../azure/media/analytics}/edge/__init__.py | 0 .../azure/media/analytics}/edge/_generated/__init__.py | 0 .../azure/media/analytics}/edge/_generated/_version.py | 0 .../azure/media/analytics}/edge/_generated/models/__init__.py | 0 .../_direct_methodsfor_live_video_analyticson_io_tedge_enums.py | 0 .../azure/media/analytics}/edge/_generated/models/_models.py | 0 .../media/analytics}/edge/_generated/models/_models_py3.py | 0 .../azure/media/analytics}/edge/_generated/py.typed | 0 .../azure/media/analytics}/edge/_version.py | 0 .../dev_requirements.txt | 0 .../docs/DevTips.md | 0 .../samples/sample_lva.py | 2 +- .../sdk_packaging.toml | 0 .../setup.cfg | 0 .../setup.py | 2 +- .../swagger/autorest.md | 2 +- .../tests/conftest.py | 0 .../tests/test_app_config.py | 0 24 files changed, 3 insertions(+), 3 deletions(-) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/CHANGELOG.md (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/MANIFEST.in (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/README.md (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/azure/__init__.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/azure/media/__init__.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/__init__.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/__init__.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/_generated/__init__.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/_generated/_version.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/_generated/models/__init__.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/_generated/models/_models.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/_generated/models/_models_py3.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/_generated/py.typed (100%) rename sdk/media/{azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics => azure-media-analyticsedge/azure/media/analytics}/edge/_version.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/dev_requirements.txt (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/docs/DevTips.md (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/samples/sample_lva.py (98%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/sdk_packaging.toml (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/setup.cfg (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/setup.py (98%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/swagger/autorest.md (76%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/tests/conftest.py (100%) rename sdk/media/{azure-media-livevideoanalytics-edge => azure-media-analyticsedge}/tests/test_app_config.py (100%) diff --git a/sdk/media/azure-media-livevideoanalytics-edge/CHANGELOG.md b/sdk/media/azure-media-analyticsedge/CHANGELOG.md similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/CHANGELOG.md rename to sdk/media/azure-media-analyticsedge/CHANGELOG.md diff --git a/sdk/media/azure-media-livevideoanalytics-edge/MANIFEST.in b/sdk/media/azure-media-analyticsedge/MANIFEST.in similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/MANIFEST.in rename to sdk/media/azure-media-analyticsedge/MANIFEST.in diff --git a/sdk/media/azure-media-livevideoanalytics-edge/README.md b/sdk/media/azure-media-analyticsedge/README.md similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/README.md rename to sdk/media/azure-media-analyticsedge/README.md diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/__init__.py b/sdk/media/azure-media-analyticsedge/azure/__init__.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/__init__.py rename to sdk/media/azure-media-analyticsedge/azure/__init__.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/__init__.py b/sdk/media/azure-media-analyticsedge/azure/media/__init__.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/__init__.py rename to sdk/media/azure-media-analyticsedge/azure/media/__init__.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/__init__.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/__init__.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/__init__.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/__init__.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/__init__.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/__init__.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/__init__.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/__init__.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/__init__.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/__init__.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/__init__.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/_version.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/_version.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/_version.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/_version.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/__init__.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/__init__.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/__init__.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/__init__.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models_py3.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/models/_models_py3.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models_py3.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/py.typed b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/py.typed similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_generated/py.typed rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/py.typed diff --git a/sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_version.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_version.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/azure/media/livevideoanalytics/edge/_version.py rename to sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_version.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/dev_requirements.txt b/sdk/media/azure-media-analyticsedge/dev_requirements.txt similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/dev_requirements.txt rename to sdk/media/azure-media-analyticsedge/dev_requirements.txt diff --git a/sdk/media/azure-media-livevideoanalytics-edge/docs/DevTips.md b/sdk/media/azure-media-analyticsedge/docs/DevTips.md similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/docs/DevTips.md rename to sdk/media/azure-media-analyticsedge/docs/DevTips.md diff --git a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py b/sdk/media/azure-media-analyticsedge/samples/sample_lva.py similarity index 98% rename from sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py rename to sdk/media/azure-media-analyticsedge/samples/sample_lva.py index f5fa934fb6fe..139a349bbcac 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/samples/sample_lva.py +++ b/sdk/media/azure-media-analyticsedge/samples/sample_lva.py @@ -2,7 +2,7 @@ import json import os #from azure.media.livevideoanalytics.edge._generated.models import * -from azure.media.livevideoanalytics.edge import * +from azure.media.analytics.edge import * from azure.iot.hub import IoTHubRegistryManager from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult from datetime import time diff --git a/sdk/media/azure-media-livevideoanalytics-edge/sdk_packaging.toml b/sdk/media/azure-media-analyticsedge/sdk_packaging.toml similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/sdk_packaging.toml rename to sdk/media/azure-media-analyticsedge/sdk_packaging.toml diff --git a/sdk/media/azure-media-livevideoanalytics-edge/setup.cfg b/sdk/media/azure-media-analyticsedge/setup.cfg similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/setup.cfg rename to sdk/media/azure-media-analyticsedge/setup.cfg diff --git a/sdk/media/azure-media-livevideoanalytics-edge/setup.py b/sdk/media/azure-media-analyticsedge/setup.py similarity index 98% rename from sdk/media/azure-media-livevideoanalytics-edge/setup.py rename to sdk/media/azure-media-analyticsedge/setup.py index e1f1f3a85b11..a333424a5e35 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/setup.py +++ b/sdk/media/azure-media-analyticsedge/setup.py @@ -13,7 +13,7 @@ from setuptools import find_packages, setup # Change the PACKAGE_NAME only to change folder and different name -PACKAGE_NAME = "azure-media-livevideoanalytics-edge" +PACKAGE_NAME = "azure-media-analytics-edge" PACKAGE_PPRINT_NAME = "Azure Media Live Video Analytics Edge SDK" # a-b-c => a/b/c diff --git a/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md b/sdk/media/azure-media-analyticsedge/swagger/autorest.md similarity index 76% rename from sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md rename to sdk/media/azure-media-analyticsedge/swagger/autorest.md index 9d8808d4c738..23c7e8518044 100644 --- a/sdk/media/azure-media-livevideoanalytics-edge/swagger/autorest.md +++ b/sdk/media/azure-media-analyticsedge/swagger/autorest.md @@ -10,7 +10,7 @@ autorest --v3 --python ## Settings ```yaml -require: <>Azure\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md +require: https://github.com/Azure/azure-rest-api-specs/blob/7b34c62199a8d84f7252dcb8b08c1b593ae65124/specification/mediaservices/data-plane/readme.md output-folder: ../azure/media/livevideoanalytics/edge/_generated namespace: azure.media.livevideoanalytics.edge no-namespace-folders: true diff --git a/sdk/media/azure-media-livevideoanalytics-edge/tests/conftest.py b/sdk/media/azure-media-analyticsedge/tests/conftest.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/tests/conftest.py rename to sdk/media/azure-media-analyticsedge/tests/conftest.py diff --git a/sdk/media/azure-media-livevideoanalytics-edge/tests/test_app_config.py b/sdk/media/azure-media-analyticsedge/tests/test_app_config.py similarity index 100% rename from sdk/media/azure-media-livevideoanalytics-edge/tests/test_app_config.py rename to sdk/media/azure-media-analyticsedge/tests/test_app_config.py From 5a6dade6c53ec97260a802d299c1aee501fe222b Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 10 Dec 2020 15:45:28 -0800 Subject: [PATCH 56/64] updating ci file with new folder name --- sdk/media/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sdk/media/ci.yml b/sdk/media/ci.yml index 647f79b8eee8..3a33ab7801fb 100644 --- a/sdk/media/ci.yml +++ b/sdk/media/ci.yml @@ -34,4 +34,6 @@ extends: safeName: azuremedialvaedge - name: azure_media_nspkg safeName: azuremedianspkg + - name: azure_media_analyticsedge + safeName: azuremediaanalyticsedge From b78b798f836c32986455e91113c599a93650f820 Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 11 Dec 2020 10:09:03 -0800 Subject: [PATCH 57/64] updated package name correctly and test --- .../CHANGELOG.md | 0 .../MANIFEST.in | 0 .../README.md | 2 +- .../azure/__init__.py | 0 .../azure/media/__init__.py | 0 .../azure/media/analyticsedge}/__init__.py | 0 .../analyticsedge}/_generated/__init__.py | 0 .../analyticsedge}/_generated/_version.py | 0 .../_generated/models/__init__.py | 0 ...r_live_video_analyticson_io_tedge_enums.py | 0 .../_generated/models/_models.py | 124 ++++++++---------- .../_generated/models/_models_py3.py | 124 ++++++++---------- .../media/analyticsedge}/_generated/py.typed | 0 .../azure/media/analyticsedge}/_version.py | 0 .../dev_requirements.txt | 0 .../docs/DevTips.md | 0 .../samples/sample_lva.py | 2 +- .../sdk_packaging.toml | 0 .../setup.cfg | 0 .../setup.py | 5 +- .../swagger/autorest.md | 4 +- .../tests/conftest.py | 0 .../tests/test_build_graph_serialize.py | 23 ++++ .../azure/media/analytics/__init__.py | 1 - .../tests/test_app_config.py | 5 - 25 files changed, 145 insertions(+), 145 deletions(-) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/CHANGELOG.md (100%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/MANIFEST.in (100%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/README.md (99%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/azure/__init__.py (100%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/azure/media/__init__.py (100%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/__init__.py (100%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/_generated/__init__.py (100%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/_generated/_version.py (100%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/_generated/models/__init__.py (100%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py (100%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/_generated/models/_models.py (93%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/_generated/models/_models_py3.py (93%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/_generated/py.typed (100%) rename sdk/media/{azure-media-analyticsedge/azure/media/analytics/edge => azure-media-analytics-edge/azure/media/analyticsedge}/_version.py (100%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/dev_requirements.txt (100%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/docs/DevTips.md (100%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/samples/sample_lva.py (99%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/sdk_packaging.toml (100%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/setup.cfg (100%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/setup.py (96%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/swagger/autorest.md (82%) rename sdk/media/{azure-media-analyticsedge => azure-media-analytics-edge}/tests/conftest.py (100%) create mode 100644 sdk/media/azure-media-analytics-edge/tests/test_build_graph_serialize.py delete mode 100644 sdk/media/azure-media-analyticsedge/azure/media/analytics/__init__.py delete mode 100644 sdk/media/azure-media-analyticsedge/tests/test_app_config.py diff --git a/sdk/media/azure-media-analyticsedge/CHANGELOG.md b/sdk/media/azure-media-analytics-edge/CHANGELOG.md similarity index 100% rename from sdk/media/azure-media-analyticsedge/CHANGELOG.md rename to sdk/media/azure-media-analytics-edge/CHANGELOG.md diff --git a/sdk/media/azure-media-analyticsedge/MANIFEST.in b/sdk/media/azure-media-analytics-edge/MANIFEST.in similarity index 100% rename from sdk/media/azure-media-analyticsedge/MANIFEST.in rename to sdk/media/azure-media-analytics-edge/MANIFEST.in diff --git a/sdk/media/azure-media-analyticsedge/README.md b/sdk/media/azure-media-analytics-edge/README.md similarity index 99% rename from sdk/media/azure-media-analyticsedge/README.md rename to sdk/media/azure-media-analytics-edge/README.md index ee06ce1f1c4f..288e7704f76b 100644 --- a/sdk/media/azure-media-analyticsedge/README.md +++ b/sdk/media/azure-media-analytics-edge/README.md @@ -16,7 +16,7 @@ Use the client library for Live Video Analytics on IoT Edge to: Install the Live Video Analytics client library for Python with pip: ```bash -pip install azure-media-livevideoanalytics-edge +pip install azure-media-analytics-edge ``` ### Prerequisites diff --git a/sdk/media/azure-media-analyticsedge/azure/__init__.py b/sdk/media/azure-media-analytics-edge/azure/__init__.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/__init__.py rename to sdk/media/azure-media-analytics-edge/azure/__init__.py diff --git a/sdk/media/azure-media-analyticsedge/azure/media/__init__.py b/sdk/media/azure-media-analytics-edge/azure/media/__init__.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/media/__init__.py rename to sdk/media/azure-media-analytics-edge/azure/media/__init__.py diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/__init__.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/__init__.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/__init__.py rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/__init__.py diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/__init__.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/__init__.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/__init__.py rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/__init__.py diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/_version.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/_version.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/_version.py rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/_version.py diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/__init__.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/__init__.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/__init__.py rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/__init__.py diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/_direct_methodsfor_live_video_analyticson_io_tedge_enums.py diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/_models.py similarity index 93% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models.py rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/_models.py index e4139c77881c..d16abafbecc2 100644 --- a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models.py +++ b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/_models.py @@ -108,7 +108,7 @@ class MediaGraphSink(msrest.serialization.Model): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] """ _validation = { @@ -148,7 +148,7 @@ class MediaGraphAssetSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param asset_name_pattern: Required. A name pattern when creating new assets. The pattern must include at least one system variable. See the documentation for available variables and additional examples. @@ -243,7 +243,7 @@ class MediaGraphProcessor(msrest.serialization.Model): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] """ _validation = { @@ -286,15 +286,15 @@ class MediaGraphExtensionProcessorBase(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint :param image: Required. Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :type image: ~azure.media.analyticsedge.models.MediaGraphImage :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. - :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions + :type sampling_options: ~azure.media.analyticsedge.models.MediaGraphSamplingOptions """ _validation = { @@ -340,15 +340,15 @@ class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBas :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint :param image: Required. Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :type image: ~azure.media.analyticsedge.models.MediaGraphImage :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. - :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions + :type sampling_options: ~azure.media.analyticsedge.models.MediaGraphSamplingOptions """ _validation = { @@ -419,7 +419,7 @@ class MediaGraphEndpoint(msrest.serialization.Model): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.analyticsedge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str """ @@ -460,7 +460,7 @@ class MediaGraphFileSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param base_directory_path: Required. Absolute directory for all outputs to the Edge device from this sink. :type base_directory_path: str @@ -513,18 +513,17 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint :param image: Required. Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :type image: ~azure.media.analyticsedge.models.MediaGraphImage :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. - :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions + :type sampling_options: ~azure.media.analyticsedge.models.MediaGraphSamplingOptions :param data_transfer: Required. How media should be transferred to the inference engine. - :type data_transfer: - ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransfer + :type data_transfer: ~azure.media.analyticsedge.models.MediaGraphGrpcExtensionDataTransfer :param extension_configuration: Optional configuration to pass to the gRPC extension. :type extension_configuration: str """ @@ -569,8 +568,7 @@ class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): :type shared_memory_size_mi_b: str :param mode: Required. How frame data should be transmitted to the inference engine. Possible values include: "Embedded", "SharedMemory". - :type mode: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransferMode + :type mode: str or ~azure.media.analyticsedge.models.MediaGraphGrpcExtensionDataTransferMode """ _validation = { @@ -602,15 +600,15 @@ class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint :param image: Required. Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :type image: ~azure.media.analyticsedge.models.MediaGraphImage :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. - :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions + :type sampling_options: ~azure.media.analyticsedge.models.MediaGraphSamplingOptions """ _validation = { @@ -678,9 +676,9 @@ class MediaGraphImage(msrest.serialization.Model): """Describes the properties of an image frame. :param scale: The scaling mode for the image. - :type scale: ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScale + :type scale: ~azure.media.analyticsedge.models.MediaGraphImageScale :param format: Encoding settings for an image. - :type format: ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormat + :type format: ~azure.media.analyticsedge.models.MediaGraphImageFormat """ _attribute_map = { @@ -819,7 +817,7 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): values include: "Yuv420p", "Rgb565be", "Rgb565le", "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". :type pixel_format: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormatRawPixelFormat + ~azure.media.analyticsedge.models.MediaGraphImageFormatRawPixelFormat """ _validation = { @@ -849,7 +847,7 @@ class MediaGraphImageScale(msrest.serialization.Model): :param mode: Required. Describes the modes for scaling an input video frame into an image, before it is sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". - :type mode: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScaleMode + :type mode: str or ~azure.media.analyticsedge.models.MediaGraphImageScaleMode :param width: The desired output width of the image. :type width: str :param height: The desired output height of the image. @@ -885,9 +883,9 @@ class MediaGraphInstance(msrest.serialization.Model): :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. - :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.analyticsedge.models.MediaGraphSystemData :param properties: Properties of a media graph instance. - :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties + :type properties: ~azure.media.analyticsedge.models.MediaGraphInstanceProperties """ _validation = { @@ -951,7 +949,7 @@ class MediaGraphInstanceCollection(msrest.serialization.Model): """A collection of media graph instances. :param value: A collection of media graph instances. - :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphInstance] + :type value: list[~azure.media.analyticsedge.models.MediaGraphInstance] :param continuation_token: A continuation token to use in subsequent calls to enumerate through the graph instance collection. This is used when the collection contains too many results to return in one response. @@ -1125,11 +1123,10 @@ class MediaGraphInstanceProperties(msrest.serialization.Model): topology with this name should already have been set in the Edge module. :type topology_name: str :param parameters: List of one or more graph instance parameters. - :type parameters: - list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDefinition] + :type parameters: list[~azure.media.analyticsedge.models.MediaGraphParameterDefinition] :param state: Allowed states for a graph instance. Possible values include: "Inactive", "Activating", "Active", "Deactivating". - :type state: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceState + :type state: str or ~azure.media.analyticsedge.models.MediaGraphInstanceState """ _attribute_map = { @@ -1162,7 +1159,7 @@ class MediaGraphInstanceSetRequest(MethodRequest): :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param instance: Required. Represents an instance of a media graph. - :type instance: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstance + :type instance: ~azure.media.analyticsedge.models.MediaGraphInstance """ _validation = { @@ -1203,9 +1200,9 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. - :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.analyticsedge.models.MediaGraphSystemData :param properties: Properties of a media graph instance. - :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties + :type properties: ~azure.media.analyticsedge.models.MediaGraphInstanceProperties """ _validation = { @@ -1247,7 +1244,7 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param hub_output_name: Required. Name of the output path to which the media graph will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest. @@ -1361,11 +1358,11 @@ class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param sensitivity: Enumeration that specifies the sensitivity of the motion detection processor. Possible values include: "Low", "Medium", "High". :type sensitivity: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphMotionDetectionSensitivity + ~azure.media.analyticsedge.models.MediaGraphMotionDetectionSensitivity :param output_motion_region: Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true. :type output_motion_region: bool @@ -1406,8 +1403,7 @@ class MediaGraphNodeInput(msrest.serialization.Model): input to this node. :type node_name: str :param output_selectors: Allows for the selection of particular streams from another node. - :type output_selectors: - list[~azure.media.livevideoanalytics.edge.models.MediaGraphOutputSelector] + :type output_selectors: list[~azure.media.analyticsedge.models.MediaGraphOutputSelector] """ _attribute_map = { @@ -1432,8 +1428,7 @@ class MediaGraphOutputSelector(msrest.serialization.Model): :ivar property: The stream property to compare with. Default value: "mediaType". :vartype property: str :param operator: The operator to compare streams by. Possible values include: "is", "isNot". - :type operator: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphOutputSelectorOperator + :type operator: str or ~azure.media.analyticsedge.models.MediaGraphOutputSelectorOperator :param value: Value to compare against. :type value: str """ @@ -1468,7 +1463,7 @@ class MediaGraphParameterDeclaration(msrest.serialization.Model): :type name: str :param type: Required. The type of the parameter. Possible values include: "String", "SecretString", "Int", "Double", "Bool". - :type type: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphParameterType + :type type: str or ~azure.media.analyticsedge.models.MediaGraphParameterType :param description: Description of the parameter. :type description: str :param default: The default value for the parameter to be used if the media graph instance does @@ -1572,9 +1567,9 @@ class MediaGraphRtspSource(MediaGraphSource): :type name: str :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. Possible values include: "Http", "Tcp". - :type transport: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphRtspTransport + :type transport: str or ~azure.media.analyticsedge.models.MediaGraphRtspTransport :param endpoint: Required. RTSP endpoint of the stream that is being connected to. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint """ _validation = { @@ -1635,7 +1630,7 @@ class MediaGraphSignalGateProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param activation_evaluation_window: The period of time over which the gate gathers input events before evaluating them. :type activation_evaluation_window: str @@ -1713,17 +1708,15 @@ class MediaGraphTlsEndpoint(MediaGraphEndpoint): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.analyticsedge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null designates that Azure Media Service's source of trust should be used. - :type trusted_certificates: - ~azure.media.livevideoanalytics.edge.models.MediaGraphCertificateSource + :type trusted_certificates: ~azure.media.analyticsedge.models.MediaGraphCertificateSource :param validation_options: Validation options to use when authenticating a TLS connection. By default, strict validation is used. - :type validation_options: - ~azure.media.livevideoanalytics.edge.models.MediaGraphTlsValidationOptions + :type validation_options: ~azure.media.analyticsedge.models.MediaGraphTlsValidationOptions """ _validation = { @@ -1782,9 +1775,9 @@ class MediaGraphTopology(msrest.serialization.Model): :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. - :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.analyticsedge.models.MediaGraphSystemData :param properties: A description of the properties of a media graph topology. - :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties + :type properties: ~azure.media.analyticsedge.models.MediaGraphTopologyProperties """ _validation = { @@ -1811,7 +1804,7 @@ class MediaGraphTopologyCollection(msrest.serialization.Model): """A collection of media graph topologies. :param value: A collection of media graph topologies. - :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphTopology] + :type value: list[~azure.media.analyticsedge.models.MediaGraphTopology] :param continuation_token: A continuation token to use in subsequent calls to enumerate through the graph topologies collection. This is used when the collection contains too many results to return in one response. @@ -1947,14 +1940,13 @@ class MediaGraphTopologyProperties(msrest.serialization.Model): :type description: str :param parameters: The list of parameters defined in the topology. The value for these parameters are supplied by instances of this topology. - :type parameters: - list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDeclaration] + :type parameters: list[~azure.media.analyticsedge.models.MediaGraphParameterDeclaration] :param sources: The list of source nodes in this topology. - :type sources: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSource] + :type sources: list[~azure.media.analyticsedge.models.MediaGraphSource] :param processors: The list of processor nodes in this topology. - :type processors: list[~azure.media.livevideoanalytics.edge.models.MediaGraphProcessor] + :type processors: list[~azure.media.analyticsedge.models.MediaGraphProcessor] :param sinks: The list of sink nodes in this topology. - :type sinks: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSink] + :type sinks: list[~azure.media.analyticsedge.models.MediaGraphSink] """ _attribute_map = { @@ -1989,7 +1981,7 @@ class MediaGraphTopologySetRequest(MethodRequest): :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param graph: Required. The definition of a media graph topology. - :type graph: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopology + :type graph: ~azure.media.analyticsedge.models.MediaGraphTopology """ _validation = { @@ -2030,9 +2022,9 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. - :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.analyticsedge.models.MediaGraphSystemData :param properties: A description of the properties of a media graph topology. - :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties + :type properties: ~azure.media.analyticsedge.models.MediaGraphTopologyProperties """ _validation = { @@ -2071,7 +2063,7 @@ class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.analyticsedge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str """ diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/_models_py3.py similarity index 93% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models_py3.py rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/_models_py3.py index 054dda46d4a6..7542b26cb7dc 100644 --- a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/models/_models_py3.py +++ b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/models/_models_py3.py @@ -115,7 +115,7 @@ class MediaGraphSink(msrest.serialization.Model): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] """ _validation = { @@ -158,7 +158,7 @@ class MediaGraphAssetSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param asset_name_pattern: Required. A name pattern when creating new assets. The pattern must include at least one system variable. See the documentation for available variables and additional examples. @@ -260,7 +260,7 @@ class MediaGraphProcessor(msrest.serialization.Model): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] """ _validation = { @@ -306,15 +306,15 @@ class MediaGraphExtensionProcessorBase(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint :param image: Required. Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :type image: ~azure.media.analyticsedge.models.MediaGraphImage :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. - :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions + :type sampling_options: ~azure.media.analyticsedge.models.MediaGraphSamplingOptions """ _validation = { @@ -366,15 +366,15 @@ class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBas :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint :param image: Required. Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :type image: ~azure.media.analyticsedge.models.MediaGraphImage :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. - :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions + :type sampling_options: ~azure.media.analyticsedge.models.MediaGraphSamplingOptions """ _validation = { @@ -451,7 +451,7 @@ class MediaGraphEndpoint(msrest.serialization.Model): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.analyticsedge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str """ @@ -495,7 +495,7 @@ class MediaGraphFileSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param base_directory_path: Required. Absolute directory for all outputs to the Edge device from this sink. :type base_directory_path: str @@ -554,18 +554,17 @@ class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint :param image: Required. Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :type image: ~azure.media.analyticsedge.models.MediaGraphImage :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. - :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions + :type sampling_options: ~azure.media.analyticsedge.models.MediaGraphSamplingOptions :param data_transfer: Required. How media should be transferred to the inference engine. - :type data_transfer: - ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransfer + :type data_transfer: ~azure.media.analyticsedge.models.MediaGraphGrpcExtensionDataTransfer :param extension_configuration: Optional configuration to pass to the gRPC extension. :type extension_configuration: str """ @@ -618,8 +617,7 @@ class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): :type shared_memory_size_mi_b: str :param mode: Required. How frame data should be transmitted to the inference engine. Possible values include: "Embedded", "SharedMemory". - :type mode: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphGrpcExtensionDataTransferMode + :type mode: str or ~azure.media.analyticsedge.models.MediaGraphGrpcExtensionDataTransferMode """ _validation = { @@ -654,15 +652,15 @@ class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param endpoint: Required. Endpoint to which this processor should connect. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint :param image: Required. Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.livevideoanalytics.edge.models.MediaGraphImage + :type image: ~azure.media.analyticsedge.models.MediaGraphImage :param sampling_options: Describes the sampling options to be applied when forwarding samples to the extension. - :type sampling_options: ~azure.media.livevideoanalytics.edge.models.MediaGraphSamplingOptions + :type sampling_options: ~azure.media.analyticsedge.models.MediaGraphSamplingOptions """ _validation = { @@ -739,9 +737,9 @@ class MediaGraphImage(msrest.serialization.Model): """Describes the properties of an image frame. :param scale: The scaling mode for the image. - :type scale: ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScale + :type scale: ~azure.media.analyticsedge.models.MediaGraphImageScale :param format: Encoding settings for an image. - :type format: ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormat + :type format: ~azure.media.analyticsedge.models.MediaGraphImageFormat """ _attribute_map = { @@ -885,7 +883,7 @@ class MediaGraphImageFormatRaw(MediaGraphImageFormat): values include: "Yuv420p", "Rgb565be", "Rgb565le", "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". :type pixel_format: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphImageFormatRawPixelFormat + ~azure.media.analyticsedge.models.MediaGraphImageFormatRawPixelFormat """ _validation = { @@ -917,7 +915,7 @@ class MediaGraphImageScale(msrest.serialization.Model): :param mode: Required. Describes the modes for scaling an input video frame into an image, before it is sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". - :type mode: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphImageScaleMode + :type mode: str or ~azure.media.analyticsedge.models.MediaGraphImageScaleMode :param width: The desired output width of the image. :type width: str :param height: The desired output height of the image. @@ -957,9 +955,9 @@ class MediaGraphInstance(msrest.serialization.Model): :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. - :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.analyticsedge.models.MediaGraphSystemData :param properties: Properties of a media graph instance. - :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties + :type properties: ~azure.media.analyticsedge.models.MediaGraphInstanceProperties """ _validation = { @@ -1029,7 +1027,7 @@ class MediaGraphInstanceCollection(msrest.serialization.Model): """A collection of media graph instances. :param value: A collection of media graph instances. - :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphInstance] + :type value: list[~azure.media.analyticsedge.models.MediaGraphInstance] :param continuation_token: A continuation token to use in subsequent calls to enumerate through the graph instance collection. This is used when the collection contains too many results to return in one response. @@ -1212,11 +1210,10 @@ class MediaGraphInstanceProperties(msrest.serialization.Model): topology with this name should already have been set in the Edge module. :type topology_name: str :param parameters: List of one or more graph instance parameters. - :type parameters: - list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDefinition] + :type parameters: list[~azure.media.analyticsedge.models.MediaGraphParameterDefinition] :param state: Allowed states for a graph instance. Possible values include: "Inactive", "Activating", "Active", "Deactivating". - :type state: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceState + :type state: str or ~azure.media.analyticsedge.models.MediaGraphInstanceState """ _attribute_map = { @@ -1254,7 +1251,7 @@ class MediaGraphInstanceSetRequest(MethodRequest): :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param instance: Required. Represents an instance of a media graph. - :type instance: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstance + :type instance: ~azure.media.analyticsedge.models.MediaGraphInstance """ _validation = { @@ -1297,9 +1294,9 @@ class MediaGraphInstanceSetRequestBody(MediaGraphInstance, MethodRequest): :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. - :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.analyticsedge.models.MediaGraphSystemData :param properties: Properties of a media graph instance. - :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphInstanceProperties + :type properties: ~azure.media.analyticsedge.models.MediaGraphInstanceProperties """ _validation = { @@ -1345,7 +1342,7 @@ class MediaGraphIoTHubMessageSink(MediaGraphSink): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param hub_output_name: Required. Name of the output path to which the media graph will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest. @@ -1468,11 +1465,11 @@ class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param sensitivity: Enumeration that specifies the sensitivity of the motion detection processor. Possible values include: "Low", "Medium", "High". :type sensitivity: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphMotionDetectionSensitivity + ~azure.media.analyticsedge.models.MediaGraphMotionDetectionSensitivity :param output_motion_region: Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true. :type output_motion_region: bool @@ -1519,8 +1516,7 @@ class MediaGraphNodeInput(msrest.serialization.Model): input to this node. :type node_name: str :param output_selectors: Allows for the selection of particular streams from another node. - :type output_selectors: - list[~azure.media.livevideoanalytics.edge.models.MediaGraphOutputSelector] + :type output_selectors: list[~azure.media.analyticsedge.models.MediaGraphOutputSelector] """ _attribute_map = { @@ -1548,8 +1544,7 @@ class MediaGraphOutputSelector(msrest.serialization.Model): :ivar property: The stream property to compare with. Default value: "mediaType". :vartype property: str :param operator: The operator to compare streams by. Possible values include: "is", "isNot". - :type operator: str or - ~azure.media.livevideoanalytics.edge.models.MediaGraphOutputSelectorOperator + :type operator: str or ~azure.media.analyticsedge.models.MediaGraphOutputSelectorOperator :param value: Value to compare against. :type value: str """ @@ -1587,7 +1582,7 @@ class MediaGraphParameterDeclaration(msrest.serialization.Model): :type name: str :param type: Required. The type of the parameter. Possible values include: "String", "SecretString", "Int", "Double", "Bool". - :type type: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphParameterType + :type type: str or ~azure.media.analyticsedge.models.MediaGraphParameterType :param description: Description of the parameter. :type description: str :param default: The default value for the parameter to be used if the media graph instance does @@ -1701,9 +1696,9 @@ class MediaGraphRtspSource(MediaGraphSource): :type name: str :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. Possible values include: "Http", "Tcp". - :type transport: str or ~azure.media.livevideoanalytics.edge.models.MediaGraphRtspTransport + :type transport: str or ~azure.media.analyticsedge.models.MediaGraphRtspTransport :param endpoint: Required. RTSP endpoint of the stream that is being connected to. - :type endpoint: ~azure.media.livevideoanalytics.edge.models.MediaGraphEndpoint + :type endpoint: ~azure.media.analyticsedge.models.MediaGraphEndpoint """ _validation = { @@ -1771,7 +1766,7 @@ class MediaGraphSignalGateProcessor(MediaGraphProcessor): :type name: str :param inputs: Required. An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.livevideoanalytics.edge.models.MediaGraphNodeInput] + :type inputs: list[~azure.media.analyticsedge.models.MediaGraphNodeInput] :param activation_evaluation_window: The period of time over which the gate gathers input events before evaluating them. :type activation_evaluation_window: str @@ -1859,17 +1854,15 @@ class MediaGraphTlsEndpoint(MediaGraphEndpoint): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.analyticsedge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null designates that Azure Media Service's source of trust should be used. - :type trusted_certificates: - ~azure.media.livevideoanalytics.edge.models.MediaGraphCertificateSource + :type trusted_certificates: ~azure.media.analyticsedge.models.MediaGraphCertificateSource :param validation_options: Validation options to use when authenticating a TLS connection. By default, strict validation is used. - :type validation_options: - ~azure.media.livevideoanalytics.edge.models.MediaGraphTlsValidationOptions + :type validation_options: ~azure.media.analyticsedge.models.MediaGraphTlsValidationOptions """ _validation = { @@ -1936,9 +1929,9 @@ class MediaGraphTopology(msrest.serialization.Model): :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. - :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.analyticsedge.models.MediaGraphSystemData :param properties: A description of the properties of a media graph topology. - :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties + :type properties: ~azure.media.analyticsedge.models.MediaGraphTopologyProperties """ _validation = { @@ -1969,7 +1962,7 @@ class MediaGraphTopologyCollection(msrest.serialization.Model): """A collection of media graph topologies. :param value: A collection of media graph topologies. - :type value: list[~azure.media.livevideoanalytics.edge.models.MediaGraphTopology] + :type value: list[~azure.media.analyticsedge.models.MediaGraphTopology] :param continuation_token: A continuation token to use in subsequent calls to enumerate through the graph topologies collection. This is used when the collection contains too many results to return in one response. @@ -2112,14 +2105,13 @@ class MediaGraphTopologyProperties(msrest.serialization.Model): :type description: str :param parameters: The list of parameters defined in the topology. The value for these parameters are supplied by instances of this topology. - :type parameters: - list[~azure.media.livevideoanalytics.edge.models.MediaGraphParameterDeclaration] + :type parameters: list[~azure.media.analyticsedge.models.MediaGraphParameterDeclaration] :param sources: The list of source nodes in this topology. - :type sources: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSource] + :type sources: list[~azure.media.analyticsedge.models.MediaGraphSource] :param processors: The list of processor nodes in this topology. - :type processors: list[~azure.media.livevideoanalytics.edge.models.MediaGraphProcessor] + :type processors: list[~azure.media.analyticsedge.models.MediaGraphProcessor] :param sinks: The list of sink nodes in this topology. - :type sinks: list[~azure.media.livevideoanalytics.edge.models.MediaGraphSink] + :type sinks: list[~azure.media.analyticsedge.models.MediaGraphSink] """ _attribute_map = { @@ -2160,7 +2152,7 @@ class MediaGraphTopologySetRequest(MethodRequest): :ivar api_version: api version. Default value: "2.0". :vartype api_version: str :param graph: Required. The definition of a media graph topology. - :type graph: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopology + :type graph: ~azure.media.analyticsedge.models.MediaGraphTopology """ _validation = { @@ -2203,9 +2195,9 @@ class MediaGraphTopologySetRequestBody(MediaGraphTopology, MethodRequest): :type name: str :param system_data: The system data for a resource. This is used by both topologies and instances. - :type system_data: ~azure.media.livevideoanalytics.edge.models.MediaGraphSystemData + :type system_data: ~azure.media.analyticsedge.models.MediaGraphSystemData :param properties: A description of the properties of a media graph topology. - :type properties: ~azure.media.livevideoanalytics.edge.models.MediaGraphTopologyProperties + :type properties: ~azure.media.analyticsedge.models.MediaGraphTopologyProperties """ _validation = { @@ -2248,7 +2240,7 @@ class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.livevideoanalytics.edge.models.MediaGraphCredentials + :type credentials: ~azure.media.analyticsedge.models.MediaGraphCredentials :param url: Required. Url for the endpoint. :type url: str """ diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/py.typed b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/py.typed similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_generated/py.typed rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_generated/py.typed diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_version.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_version.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/azure/media/analytics/edge/_version.py rename to sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/_version.py diff --git a/sdk/media/azure-media-analyticsedge/dev_requirements.txt b/sdk/media/azure-media-analytics-edge/dev_requirements.txt similarity index 100% rename from sdk/media/azure-media-analyticsedge/dev_requirements.txt rename to sdk/media/azure-media-analytics-edge/dev_requirements.txt diff --git a/sdk/media/azure-media-analyticsedge/docs/DevTips.md b/sdk/media/azure-media-analytics-edge/docs/DevTips.md similarity index 100% rename from sdk/media/azure-media-analyticsedge/docs/DevTips.md rename to sdk/media/azure-media-analytics-edge/docs/DevTips.md diff --git a/sdk/media/azure-media-analyticsedge/samples/sample_lva.py b/sdk/media/azure-media-analytics-edge/samples/sample_lva.py similarity index 99% rename from sdk/media/azure-media-analyticsedge/samples/sample_lva.py rename to sdk/media/azure-media-analytics-edge/samples/sample_lva.py index 139a349bbcac..ea6058734888 100644 --- a/sdk/media/azure-media-analyticsedge/samples/sample_lva.py +++ b/sdk/media/azure-media-analytics-edge/samples/sample_lva.py @@ -2,7 +2,7 @@ import json import os #from azure.media.livevideoanalytics.edge._generated.models import * -from azure.media.analytics.edge import * +from azure.media.analyticsedge import * from azure.iot.hub import IoTHubRegistryManager from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult from datetime import time diff --git a/sdk/media/azure-media-analyticsedge/sdk_packaging.toml b/sdk/media/azure-media-analytics-edge/sdk_packaging.toml similarity index 100% rename from sdk/media/azure-media-analyticsedge/sdk_packaging.toml rename to sdk/media/azure-media-analytics-edge/sdk_packaging.toml diff --git a/sdk/media/azure-media-analyticsedge/setup.cfg b/sdk/media/azure-media-analytics-edge/setup.cfg similarity index 100% rename from sdk/media/azure-media-analyticsedge/setup.cfg rename to sdk/media/azure-media-analytics-edge/setup.cfg diff --git a/sdk/media/azure-media-analyticsedge/setup.py b/sdk/media/azure-media-analytics-edge/setup.py similarity index 96% rename from sdk/media/azure-media-analyticsedge/setup.py rename to sdk/media/azure-media-analytics-edge/setup.py index a333424a5e35..ac63e10dfe9e 100644 --- a/sdk/media/azure-media-analyticsedge/setup.py +++ b/sdk/media/azure-media-analytics-edge/setup.py @@ -14,12 +14,11 @@ # Change the PACKAGE_NAME only to change folder and different name PACKAGE_NAME = "azure-media-analytics-edge" +NAMESPACE_NAME = "azure.media.analyticsedge" PACKAGE_PPRINT_NAME = "Azure Media Live Video Analytics Edge SDK" # a-b-c => a/b/c -package_folder_path = PACKAGE_NAME.replace('-', '/') -# a-b-c => a.b.c -namespace_name = PACKAGE_NAME.replace('-', '.') +package_folder_path = NAMESPACE_NAME.replace('.', '/') # azure v0.x is not compatible with this package # azure v0.x used to have a __version__ attribute (newer versions don't) diff --git a/sdk/media/azure-media-analyticsedge/swagger/autorest.md b/sdk/media/azure-media-analytics-edge/swagger/autorest.md similarity index 82% rename from sdk/media/azure-media-analyticsedge/swagger/autorest.md rename to sdk/media/azure-media-analytics-edge/swagger/autorest.md index 23c7e8518044..919859203e35 100644 --- a/sdk/media/azure-media-analyticsedge/swagger/autorest.md +++ b/sdk/media/azure-media-analytics-edge/swagger/autorest.md @@ -11,8 +11,8 @@ autorest --v3 --python ```yaml require: https://github.com/Azure/azure-rest-api-specs/blob/7b34c62199a8d84f7252dcb8b08c1b593ae65124/specification/mediaservices/data-plane/readme.md -output-folder: ../azure/media/livevideoanalytics/edge/_generated -namespace: azure.media.livevideoanalytics.edge +output-folder: ../azure/media/analyticsedge/_generated +namespace: azure.media.analyticsedge no-namespace-folders: true license-header: MICROSOFT_MIT_NO_VERSION enable-xml: false diff --git a/sdk/media/azure-media-analyticsedge/tests/conftest.py b/sdk/media/azure-media-analytics-edge/tests/conftest.py similarity index 100% rename from sdk/media/azure-media-analyticsedge/tests/conftest.py rename to sdk/media/azure-media-analytics-edge/tests/conftest.py diff --git a/sdk/media/azure-media-analytics-edge/tests/test_build_graph_serialize.py b/sdk/media/azure-media-analytics-edge/tests/test_build_graph_serialize.py new file mode 100644 index 000000000000..d46839833404 --- /dev/null +++ b/sdk/media/azure-media-analytics-edge/tests/test_build_graph_serialize.py @@ -0,0 +1,23 @@ +import pytest +from azure.media.analyticsedge import * + +class TestGraphBuildSerialize(): + def test_build_graph_serialize(): + graph_topology_name = "graphTopology1" + graph_properties = MediaGraphTopologyProperties() + graph_properties.description = "Continuous video recording to an Azure Media Services Asset" + user_name_param = MediaGraphParameterDeclaration(name="rtspUserName",type="String",default="dummyusername") + password_param = MediaGraphParameterDeclaration(name="rtspPassword",type="String",default="dummypassword") + url_param = MediaGraphParameterDeclaration(name="rtspUrl",type="String",default="rtsp://www.sample.com") + + source = MediaGraphRtspSource(name="rtspSource", endpoint=MediaGraphUnsecuredEndpoint(url="${rtspUrl}",credentials=MediaGraphUsernamePasswordCredentials(username="${rtspUserName}",password="${rtspPassword}"))) + node = MediaGraphNodeInput(node_name="rtspSource") + sink = MediaGraphAssetSink(name="assetsink", inputs=[node],asset_name_pattern='sampleAsset-${System.GraphTopologyName}-${System.GraphInstanceName}', segment_length="PT0H0M30S",local_media_cache_maximum_size_mi_b=2048,local_media_cache_path="/var/lib/azuremediaservices/tmp/") + graph_properties.parameters = [user_name_param, password_param, url_param] + graph_properties.sources = [source] + graph_properties.sinks = [sink] + graph = MediaGraphTopology(name=graph_topology_name,properties=graph_properties) + + set_graph_method = MediaGraphTopologySetRequest(graph=graph) + set_graph_method_serialize = set_graph_method.serialize() + assert set_graph_method_serialize['name'] == graph_topology_name \ No newline at end of file diff --git a/sdk/media/azure-media-analyticsedge/azure/media/analytics/__init__.py b/sdk/media/azure-media-analyticsedge/azure/media/analytics/__init__.py deleted file mode 100644 index 69e3be50dac4..000000000000 --- a/sdk/media/azure-media-analyticsedge/azure/media/analytics/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/media/azure-media-analyticsedge/tests/test_app_config.py b/sdk/media/azure-media-analyticsedge/tests/test_app_config.py deleted file mode 100644 index 57f0ccfa146f..000000000000 --- a/sdk/media/azure-media-analyticsedge/tests/test_app_config.py +++ /dev/null @@ -1,5 +0,0 @@ -import pytest - -class TestAppConfig(): - def test_something(self): - assert 1 \ No newline at end of file From a955d9f82a966d382032c9d3ca748eb66387dfcf Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 11 Dec 2020 10:32:24 -0800 Subject: [PATCH 58/64] updating test with missing parameter and sampels with better placeholder strings --- sdk/media/azure-media-analytics-edge/samples/sample_lva.py | 6 +++--- .../tests/test_build_graph_serialize.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/media/azure-media-analytics-edge/samples/sample_lva.py b/sdk/media/azure-media-analytics-edge/samples/sample_lva.py index ea6058734888..634266833dbf 100644 --- a/sdk/media/azure-media-analytics-edge/samples/sample_lva.py +++ b/sdk/media/azure-media-analytics-edge/samples/sample_lva.py @@ -7,9 +7,9 @@ from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult from datetime import time -device_id = "enter-your-device-name" -module_d = "enter-your-module-name" -connection_string = "enter-your-connection-string" +device_id = "device-name" +module_d = "module-name" +connection_string = "connection-string" graph_instance_name = "graphInstance1" graph_topology_name = "graphTopology1" graph_url = "rtsp://sample-url-from-camera" diff --git a/sdk/media/azure-media-analytics-edge/tests/test_build_graph_serialize.py b/sdk/media/azure-media-analytics-edge/tests/test_build_graph_serialize.py index d46839833404..fc8fe7185dc8 100644 --- a/sdk/media/azure-media-analytics-edge/tests/test_build_graph_serialize.py +++ b/sdk/media/azure-media-analytics-edge/tests/test_build_graph_serialize.py @@ -2,7 +2,7 @@ from azure.media.analyticsedge import * class TestGraphBuildSerialize(): - def test_build_graph_serialize(): + def test_build_graph_serialize(self): graph_topology_name = "graphTopology1" graph_properties = MediaGraphTopologyProperties() graph_properties.description = "Continuous video recording to an Azure Media Services Asset" From a14b4ce5a9b17094a6c2f81272a6dce4c59bc7be Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 11 Dec 2020 10:54:54 -0800 Subject: [PATCH 59/64] updating ci file --- sdk/media/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/sdk/media/ci.yml b/sdk/media/ci.yml index 3a33ab7801fb..a709b503e4ed 100644 --- a/sdk/media/ci.yml +++ b/sdk/media/ci.yml @@ -35,5 +35,6 @@ extends: - name: azure_media_nspkg safeName: azuremedianspkg - name: azure_media_analyticsedge + - name: azure_media_analytics_edge safeName: azuremediaanalyticsedge From 0cbefcf8330fd27bb5b1e190b32e90f66de6e88a Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 11 Dec 2020 12:24:07 -0800 Subject: [PATCH 60/64] merged nspkg --- sdk/media/ci.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/sdk/media/ci.yml b/sdk/media/ci.yml index a709b503e4ed..0c7d86d0ada9 100644 --- a/sdk/media/ci.yml +++ b/sdk/media/ci.yml @@ -30,11 +30,8 @@ extends: Artifacts: - name: azure_mgmt_media safeName: azuremgmtmedia - - name: azure_media_lva_edge - safeName: azuremedialvaedge - name: azure_media_nspkg safeName: azuremedianspkg - - name: azure_media_analyticsedge - name: azure_media_analytics_edge safeName: azuremediaanalyticsedge From 0ae914e037196652b12465c5a38d5783fbbd7dbc Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 11 Dec 2020 15:39:47 -0800 Subject: [PATCH 61/64] addressing PR comments --- .../azure-media-analytics-edge/MANIFEST.in | 1 + .../azure-media-analytics-edge/README.md | 1 + .../azure/media/analyticsedge/__init__.py | 7 +++++ .../samples/sample_lva.py | 3 +-- sdk/media/azure-media-analytics-edge/setup.py | 26 +++++++------------ 5 files changed, 20 insertions(+), 18 deletions(-) diff --git a/sdk/media/azure-media-analytics-edge/MANIFEST.in b/sdk/media/azure-media-analytics-edge/MANIFEST.in index 7ebdd947f8ff..355ca1aa3183 100644 --- a/sdk/media/azure-media-analytics-edge/MANIFEST.in +++ b/sdk/media/azure-media-analytics-edge/MANIFEST.in @@ -1,4 +1,5 @@ recursive-include tests *.py include *.md include azure/__init__.py +include azure/media/__init__.py recursive-include samples *.py *.md diff --git a/sdk/media/azure-media-analytics-edge/README.md b/sdk/media/azure-media-analytics-edge/README.md index 288e7704f76b..001d8d328194 100644 --- a/sdk/media/azure-media-analytics-edge/README.md +++ b/sdk/media/azure-media-analytics-edge/README.md @@ -22,6 +22,7 @@ pip install azure-media-analytics-edge * Python 2.7, or 3.5 or later is required to use this package. * You need an active [Azure subscription][azure_sub], and a [IoT device connection string][iot_device_connection_string] to use this package. +* To interact with Azure IoT Hub you will need to run `pip install azure-iot-hub` * You will need to use the version of the SDK that corresponds to the version of the LVA Edge module you are using. | SDK | LVA Edge Module | diff --git a/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/__init__.py b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/__init__.py index 7f07d48526e2..f0e634c72a00 100644 --- a/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/__init__.py +++ b/sdk/media/azure-media-analytics-edge/azure/media/analyticsedge/__init__.py @@ -1,6 +1,13 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- from ._generated.models import * from ._generated import models +from ._version import VERSION +__version__ = VERSION __all__ = models.__all__ def _OverrideTopologySetRequestSerialize(self): diff --git a/sdk/media/azure-media-analytics-edge/samples/sample_lva.py b/sdk/media/azure-media-analytics-edge/samples/sample_lva.py index 634266833dbf..b6cd17fe7eac 100644 --- a/sdk/media/azure-media-analytics-edge/samples/sample_lva.py +++ b/sdk/media/azure-media-analytics-edge/samples/sample_lva.py @@ -1,9 +1,8 @@ import json import os -#from azure.media.livevideoanalytics.edge._generated.models import * from azure.media.analyticsedge import * -from azure.iot.hub import IoTHubRegistryManager +from azure.iot.hub import IoTHubRegistryManager #run pip install azure-iot-hub to get this package from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult from datetime import time diff --git a/sdk/media/azure-media-analytics-edge/setup.py b/sdk/media/azure-media-analytics-edge/setup.py index ac63e10dfe9e..9be5f5ab4a4c 100644 --- a/sdk/media/azure-media-analytics-edge/setup.py +++ b/sdk/media/azure-media-analytics-edge/setup.py @@ -48,19 +48,6 @@ with open('CHANGELOG.md', encoding='utf-8') as f: changelog = f.read() -exclude_packages = [ - 'tests', - 'tests.*', - 'samples', - # Exclude packages that will be covered by PEP420 or nspkg - 'azure', - ] -if sys.version_info < (3, 5, 3): - exclude_packages.extend([ - '*.aio', - '*.aio.*' - ]) - setup( name=PACKAGE_NAME, version=version, @@ -70,7 +57,7 @@ license='MIT License', author='Microsoft Corporation', author_email='azpysdkhelp@microsoft.com', - url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/appconfiguration/azure-appconfiguration', + url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/media/azure-nedua-analytics-edge', classifiers=[ "Development Status :: 4 - Beta", 'Programming Language :: Python', @@ -85,10 +72,17 @@ 'License :: OSI Approved :: MIT License', ], zip_safe=False, - packages=find_packages(exclude=exclude_packages), + packages=find_packages( + exclude=[ + "samples", + "tests", + # Exclude packages that will be covered by PEP420 or nspkg + "azure", + "azure.media", + ] + ), install_requires=[ "msrest>=0.5.0", - "azure-core<2.0.0,>=1.2.2", ], extras_require={ ":python_version<'3.0'": ['azure-nspkg'], From 0a9c549386812e75cb4006fc7ce55e75b3aa7934 Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 11 Dec 2020 15:49:05 -0800 Subject: [PATCH 62/64] fixing typo --- sdk/media/azure-media-analytics-edge/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/media/azure-media-analytics-edge/setup.py b/sdk/media/azure-media-analytics-edge/setup.py index 9be5f5ab4a4c..16972a6f41e5 100644 --- a/sdk/media/azure-media-analytics-edge/setup.py +++ b/sdk/media/azure-media-analytics-edge/setup.py @@ -57,7 +57,7 @@ license='MIT License', author='Microsoft Corporation', author_email='azpysdkhelp@microsoft.com', - url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/media/azure-nedua-analytics-edge', + url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/media/azure-media-analytics-edge', classifiers=[ "Development Status :: 4 - Beta", 'Programming Language :: Python', From 439e9dc170484abce8f1d267976958120edb51b6 Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 11 Dec 2020 15:50:05 -0800 Subject: [PATCH 63/64] adding media to nspkg ref --- sdk/media/azure-media-analytics-edge/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/media/azure-media-analytics-edge/setup.py b/sdk/media/azure-media-analytics-edge/setup.py index 16972a6f41e5..c03b0d627a5f 100644 --- a/sdk/media/azure-media-analytics-edge/setup.py +++ b/sdk/media/azure-media-analytics-edge/setup.py @@ -85,7 +85,7 @@ "msrest>=0.5.0", ], extras_require={ - ":python_version<'3.0'": ['azure-nspkg'], + ":python_version<'3.0'": ['azure-media-nspkg'], ":python_version<'3.4'": ['enum34>=1.0.4'], ":python_version<'3.5'": ['typing'], } From f8d218efb7c1e6c682128842c9175db693953780 Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 11 Dec 2020 16:19:59 -0800 Subject: [PATCH 64/64] adding media-mspkg to shared_requirements --- shared_requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/shared_requirements.txt b/shared_requirements.txt index 9ba3fb1f7d99..f4c3494909ce 100644 --- a/shared_requirements.txt +++ b/shared_requirements.txt @@ -86,6 +86,7 @@ azure-mgmt-trafficmanager~=0.50.0 azure-mgmt-web~=0.35.0 azure-nspkg azure-keyvault-nspkg +azure-media-nspkg azure-schemaregistry==1.0.0b1 azure-search-nspkg azure-security-nspkg