From 599896fef37fd410fcc32e6939992a23d785dc98 Mon Sep 17 00:00:00 2001 From: yijxie Date: Sat, 18 May 2019 16:20:26 -0700 Subject: [PATCH 01/54] remove async_ops --- .../azure/eventhub/async_ops/__init__.py | 325 ----------------- .../eventhub/async_ops/receiver_async.py | 315 ----------------- .../azure/eventhub/async_ops/sender_async.py | 330 ------------------ 3 files changed, 970 deletions(-) delete mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/async_ops/__init__.py delete mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/async_ops/receiver_async.py delete mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/async_ops/sender_async.py diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/async_ops/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/async_ops/__init__.py deleted file mode 100644 index 784746d04bb8..000000000000 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/async_ops/__init__.py +++ /dev/null @@ -1,325 +0,0 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- - -import logging -import asyncio -import time -import datetime -from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus - -from uamqp import authentication, constants, types, errors -from uamqp import ( - Message, - ConnectionAsync, - AMQPClientAsync, - SendClientAsync, - ReceiveClientAsync) - -from azure.eventhub.common import parse_sas_token -from azure.eventhub import ( - Sender, - Receiver, - EventHubClient, - EventData, - EventHubError) - -from .sender_async import AsyncSender -from .receiver_async import AsyncReceiver - - -log = logging.getLogger(__name__) - - -class EventHubClientAsync(EventHubClient): - """ - The EventHubClient class defines a high level interface for asynchronously - sending events to and receiving events from the Azure Event Hubs service. - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START create_eventhub_client_async] - :end-before: [END create_eventhub_client_async] - :language: python - :dedent: 4 - :caption: Create a new instance of the Event Hub client async. - - """ - - def _create_auth(self, username=None, password=None): - """ - Create an ~uamqp.authentication.cbs_auth_async.SASTokenAuthAsync instance to authenticate - the session. - - :param username: The name of the shared access policy. - :type username: str - :param password: The shared access key. - :type password: str - """ - if self.sas_token: - token = self.sas_token() if callable(self.sas_token) else self.sas_token - try: - expiry = int(parse_sas_token(token)['se']) - except (KeyError, TypeError, IndexError): - raise ValueError("Supplied SAS token has no valid expiry value.") - return authentication.SASTokenAsync( - self.auth_uri, self.auth_uri, token, - expires_at=expiry, - timeout=self.auth_timeout, - http_proxy=self.http_proxy) - - username = username or self._auth_config['username'] - password = password or self._auth_config['password'] - if "@sas.root" in username: - return authentication.SASLPlain( - self.address.hostname, username, password, http_proxy=self.http_proxy) - return authentication.SASTokenAsync.from_shared_access_key( - self.auth_uri, username, password, timeout=self.auth_timeout, http_proxy=self.http_proxy) - - async def _close_clients_async(self): - """ - Close all open AsyncSender/AsyncReceiver clients. - """ - await asyncio.gather(*[c.close_async() for c in self.clients]) - - async def _wait_for_client(self, client): - try: - while client.get_handler_state().value == 2: - await client._handler._connection.work_async() # pylint: disable=protected-access - except Exception as exp: # pylint: disable=broad-except - await client.close_async(exception=exp) - - async def _start_client_async(self, client): - try: - if not client.running: - await client.open_async() - except Exception as exp: # pylint: disable=broad-except - log.info("Encountered error while starting handler: %r", exp) - await client.close_async(exception=exp) - log.info("Finished closing failed handler") - - async def _handle_redirect(self, redirects): - if len(redirects) != len(self.clients): - not_redirected = [c for c in self.clients if not c.redirected] - _, timeout = await asyncio.wait([self._wait_for_client(c) for c in not_redirected], timeout=5) - if timeout: - raise EventHubError("Some clients are attempting to redirect the connection.") - redirects = [c.redirected for c in self.clients if c.redirected] - if not all(r.hostname == redirects[0].hostname for r in redirects): - raise EventHubError("Multiple clients attempting to redirect to different hosts.") - self._process_redirect_uri(redirects[0]) - await asyncio.gather(*[c.open_async() for c in self.clients]) - - async def run_async(self): - """ - Run the EventHubClient asynchronously. - Opens the connection and starts running all AsyncSender/AsyncReceiver clients. - Returns a list of the start up results. For a succcesful client start the - result will be `None`, otherwise the exception raised. - If all clients failed to start, then run will fail, shut down the connection - and raise an exception. - If at least one client starts up successfully the run command will succeed. - - :rtype: list[~azure.eventhub.common.EventHubError] - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START eventhub_client_run_async] - :end-before: [END eventhub_client_run_async] - :language: python - :dedent: 4 - :caption: Run the EventHubClient asynchronously. - - """ - log.info("%r: Starting %r clients", self.container_id, len(self.clients)) - tasks = [self._start_client_async(c) for c in self.clients] - try: - await asyncio.gather(*tasks) - redirects = [c.redirected for c in self.clients if c.redirected] - failed = [c.error for c in self.clients if c.error] - if failed and len(failed) == len(self.clients): - log.warning("%r: All clients failed to start.", self.container_id) - raise failed[0] - if failed: - log.warning("%r: %r clients failed to start.", self.container_id, len(failed)) - elif redirects: - await self._handle_redirect(redirects) - except EventHubError: - await self.stop_async() - raise - except Exception as exp: - await self.stop_async() - raise EventHubError(str(exp)) - return failed - - async def stop_async(self): - """ - Stop the EventHubClient and all its Sender/Receiver clients. - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START eventhub_client_async_stop] - :end-before: [END eventhub_client_async_stop] - :language: python - :dedent: 4 - :caption: Stop the EventHubClient and all its Sender/Receiver clients. - - """ - log.info("%r: Stopping %r clients", self.container_id, len(self.clients)) - self.stopped = True - await self._close_clients_async() - - async def get_eventhub_info_async(self): - """ - Get details on the specified EventHub async. - - :rtype: dict - """ - alt_creds = { - "username": self._auth_config.get("iot_username"), - "password":self._auth_config.get("iot_password")} - try: - mgmt_auth = self._create_auth(**alt_creds) - mgmt_client = AMQPClientAsync(self.mgmt_target, auth=mgmt_auth, debug=self.debug) - await mgmt_client.open_async() - mgmt_msg = Message(application_properties={'name': self.eh_name}) - response = await mgmt_client.mgmt_request_async( - mgmt_msg, - constants.READ_OPERATION, - op_type=b'com.microsoft:eventhub', - status_code_field=b'status-code', - description_fields=b'status-description') - eh_info = response.get_data() - output = {} - if eh_info: - output['name'] = eh_info[b'name'].decode('utf-8') - output['type'] = eh_info[b'type'].decode('utf-8') - output['created_at'] = datetime.datetime.fromtimestamp(float(eh_info[b'created_at'])/1000) - output['partition_count'] = eh_info[b'partition_count'] - output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']] - return output - finally: - await mgmt_client.close_async() - - def add_async_receiver( - self, consumer_group, partition, offset=None, prefetch=300, - operation=None, keep_alive=30, auto_reconnect=True, loop=None): - """ - Add an async receiver to the client for a particular consumer group and partition. - - :param consumer_group: The name of the consumer group. - :type consumer_group: str - :param partition: The ID of the partition. - :type partition: str - :param offset: The offset from which to start receiving. - :type offset: ~azure.eventhub.common.Offset - :param prefetch: The message prefetch count of the receiver. Default is 300. - :type prefetch: int - :operation: An optional operation to be appended to the hostname in the source URL. - The value must start with `/` character. - :type operation: str - :rtype: ~azure.eventhub.async_ops.receiver_async.ReceiverAsync - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START create_eventhub_client_async_receiver] - :end-before: [END create_eventhub_client_async_receiver] - :language: python - :dedent: 4 - :caption: Add an async receiver to the client for a particular consumer group and partition. - - """ - path = self.address.path + operation if operation else self.address.path - source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( - self.address.hostname, path, consumer_group, partition) - handler = AsyncReceiver( - self, source_url, offset=offset, prefetch=prefetch, - keep_alive=keep_alive, auto_reconnect=auto_reconnect, loop=loop) - self.clients.append(handler) - return handler - - def add_async_epoch_receiver( - self, consumer_group, partition, epoch, prefetch=300, - operation=None, keep_alive=30, auto_reconnect=True, loop=None): - """ - Add an async receiver to the client with an epoch value. Only a single epoch receiver - can connect to a partition at any given time - additional epoch receivers must have - a higher epoch value or they will be rejected. If a 2nd epoch receiver has - connected, the first will be closed. - - :param consumer_group: The name of the consumer group. - :type consumer_group: str - :param partition: The ID of the partition. - :type partition: str - :param epoch: The epoch value for the receiver. - :type epoch: int - :param prefetch: The message prefetch count of the receiver. Default is 300. - :type prefetch: int - :operation: An optional operation to be appended to the hostname in the source URL. - The value must start with `/` character. - :type operation: str - :rtype: ~azure.eventhub.async_ops.receiver_async.ReceiverAsync - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START create_eventhub_client_async_epoch_receiver] - :end-before: [END create_eventhub_client_async_epoch_receiver] - :language: python - :dedent: 4 - :caption: Add an async receiver to the client with an epoch value. - - """ - path = self.address.path + operation if operation else self.address.path - source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( - self.address.hostname, path, consumer_group, partition) - handler = AsyncReceiver( - self, source_url, prefetch=prefetch, epoch=epoch, - keep_alive=keep_alive, auto_reconnect=auto_reconnect, loop=loop) - self.clients.append(handler) - return handler - - def add_async_sender( - self, partition=None, operation=None, send_timeout=60, - keep_alive=30, auto_reconnect=True, loop=None): - """ - Add an async sender to the client to send ~azure.eventhub.common.EventData object - to an EventHub. - - :param partition: Optionally specify a particular partition to send to. - If omitted, the events will be distributed to available partitions via - round-robin. - :type partition: str - :operation: An optional operation to be appended to the hostname in the target URL. - The value must start with `/` character. - :type operation: str - :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is - queued. Default value is 60 seconds. If set to 0, there will be no timeout. - :type send_timeout: int - :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during - periods of inactivity. The default value is 30 seconds. If set to `None`, the connection will not - be pinged. - :type keep_alive: int - :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. - Default value is `True`. - :type auto_reconnect: bool - :rtype: ~azure.eventhub.async_ops.sender_async.SenderAsync - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START create_eventhub_client_async_sender] - :end-before: [END create_eventhub_client_async_sender] - :language: python - :dedent: 4 - :caption: Add an async sender to the client to - send ~azure.eventhub.common.EventData object to an EventHub. - - """ - target = "amqps://{}{}".format(self.address.hostname, self.address.path) - if operation: - target = target + operation - handler = AsyncSender( - self, target, partition=partition, send_timeout=send_timeout, keep_alive=keep_alive, - auto_reconnect=auto_reconnect, loop=loop) - self.clients.append(handler) - return handler diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/async_ops/receiver_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/async_ops/receiver_async.py deleted file mode 100644 index 3dc17b57a689..000000000000 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/async_ops/receiver_async.py +++ /dev/null @@ -1,315 +0,0 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- - -import asyncio -import uuid -import logging - -from uamqp import errors, types -from uamqp import ReceiveClientAsync, Source - -from azure.eventhub import EventHubError, EventData -from azure.eventhub.receiver import Receiver -from azure.eventhub.common import _error_handler - -log = logging.getLogger(__name__) - - -class AsyncReceiver(Receiver): - """ - Implements the async API of a Receiver. - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START create_eventhub_client_async_receiver_instance] - :end-before: [END create_eventhub_client_async_receiver_instance] - :language: python - :dedent: 4 - :caption: Create a new instance of the Async Receiver. - - """ - - def __init__( # pylint: disable=super-init-not-called - self, client, source, offset=None, prefetch=300, epoch=None, - keep_alive=None, auto_reconnect=True, loop=None): - """ - Instantiate an async receiver. - - :param client: The parent EventHubClientAsync. - :type client: ~azure.eventhub.async_ops.EventHubClientAsync - :param source: The source EventHub from which to receive events. - :type source: ~uamqp.address.Source - :param prefetch: The number of events to prefetch from the service - for processing. Default is 300. - :type prefetch: int - :param epoch: An optional epoch value. - :type epoch: int - :param loop: An event loop. - """ - self.loop = loop or asyncio.get_event_loop() - self.running = False - self.client = client - self.source = source - self.offset = offset - self.prefetch = prefetch - self.epoch = epoch - self.keep_alive = keep_alive - self.auto_reconnect = auto_reconnect - self.retry_policy = errors.ErrorPolicy(max_retries=3, on_error=_error_handler) - self.reconnect_backoff = 1 - self.redirected = None - self.error = None - self.properties = None - partition = self.source.split('/')[-1] - self.name = "EHReceiver-{}-partition{}".format(uuid.uuid4(), partition) - source = Source(self.source) - if self.offset is not None: - source.set_filter(self.offset.selector()) - if epoch: - self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))} - self._handler = ReceiveClientAsync( - source, - auth=self.client.get_auth(), - debug=self.client.debug, - prefetch=self.prefetch, - link_properties=self.properties, - timeout=self.timeout, - error_policy=self.retry_policy, - keep_alive_interval=self.keep_alive, - client_name=self.name, - properties=self.client.create_properties(), - loop=self.loop) - - async def open_async(self): - """ - Open the Receiver using the supplied conneciton. - If the handler has previously been redirected, the redirect - context will be used to create a new handler before opening it. - - :param connection: The underlying client shared connection. - :type: connection: ~uamqp.async_ops.connection_async.ConnectionAsync - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START eventhub_client_async_receiver_open] - :end-before: [END eventhub_client_async_receiver_open] - :language: python - :dedent: 4 - :caption: Open the Receiver using the supplied conneciton. - - """ - # pylint: disable=protected-access - self.running = True - if self.redirected: - self.source = self.redirected.address - source = Source(self.source) - if self.offset is not None: - source.set_filter(self.offset.selector()) - alt_creds = { - "username": self.client._auth_config.get("iot_username"), - "password":self.client._auth_config.get("iot_password")} - self._handler = ReceiveClientAsync( - source, - auth=self.client.get_auth(**alt_creds), - debug=self.client.debug, - prefetch=self.prefetch, - link_properties=self.properties, - timeout=self.timeout, - error_policy=self.retry_policy, - keep_alive_interval=self.keep_alive, - client_name=self.name, - properties=self.client.create_properties(), - loop=self.loop) - await self._handler.open_async() - while not await self._handler.client_ready_async(): - await asyncio.sleep(0.05) - - async def _reconnect_async(self): # pylint: disable=too-many-statements - # pylint: disable=protected-access - alt_creds = { - "username": self.client._auth_config.get("iot_username"), - "password":self.client._auth_config.get("iot_password")} - await self._handler.close_async() - source = Source(self.source) - if self.offset is not None: - source.set_filter(self.offset.selector()) - self._handler = ReceiveClientAsync( - source, - auth=self.client.get_auth(**alt_creds), - debug=self.client.debug, - prefetch=self.prefetch, - link_properties=self.properties, - timeout=self.timeout, - error_policy=self.retry_policy, - keep_alive_interval=self.keep_alive, - client_name=self.name, - properties=self.client.create_properties(), - loop=self.loop) - try: - await self._handler.open_async() - while not await self._handler.client_ready_async(): - await asyncio.sleep(0.05) - return True - except errors.TokenExpired as shutdown: - log.info("AsyncReceiver disconnected due to token expiry. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close_async(exception=error) - raise error - except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("AsyncReceiver detached. Attempting reconnect.") - return False - log.info("AsyncReceiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close_async(exception=error) - raise error - except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("AsyncReceiver detached. Attempting reconnect.") - return False - log.info("AsyncReceiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close_async(exception=error) - raise error - except errors.AMQPConnectionError as shutdown: - if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: - log.info("AsyncReceiver couldn't authenticate. Attempting reconnect.") - return False - log.info("AsyncReceiver connection error (%r). Shutting down.", shutdown) - error = EventHubError(str(shutdown)) - await self.close_async(exception=error) - raise error - except Exception as e: - log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Receiver reconnect failed: {}".format(e)) - await self.close_async(exception=error) - raise error - - async def reconnect_async(self): - """If the Receiver was disconnected from the service with - a retryable error - attempt to reconnect.""" - while not await self._reconnect_async(): - await asyncio.sleep(self.reconnect_backoff) - - async def has_started(self): - """ - Whether the handler has completed all start up processes such as - establishing the connection, session, link and authentication, and - is not ready to process messages. - **This function is now deprecated and will be removed in v2.0+.** - - :rtype: bool - """ - # pylint: disable=protected-access - timeout = False - auth_in_progress = False - if self._handler._connection.cbs: - timeout, auth_in_progress = await self._handler._auth.handle_token_async() - if timeout: - raise EventHubError("Authorization timeout.") - if auth_in_progress: - return False - if not await self._handler._client_ready_async(): - return False - return True - - async def close_async(self, exception=None): - """ - Close down the handler. If the handler has already closed, - this will be a no op. An optional exception can be passed in to - indicate that the handler was shutdown due to error. - - :param exception: An optional exception if the handler is closing - due to an error. - :type exception: Exception - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START eventhub_client_async_receiver_close] - :end-before: [END eventhub_client_async_receiver_close] - :language: python - :dedent: 4 - :caption: Close down the handler. - - """ - self.running = False - if self.error: - return - if isinstance(exception, errors.LinkRedirect): - self.redirected = exception - elif isinstance(exception, EventHubError): - self.error = exception - elif isinstance(exception, (errors.LinkDetach, errors.ConnectionClose)): - self.error = EventHubError(str(exception), exception) - elif exception: - self.error = EventHubError(str(exception)) - else: - self.error = EventHubError("This receive handler is now closed.") - await self._handler.close_async() - - async def receive(self, max_batch_size=None, timeout=None): - """ - Receive events asynchronously from the EventHub. - - :param max_batch_size: Receive a batch of events. Batch size will - be up to the maximum specified, but will return as soon as service - returns no new events. If combined with a timeout and no events are - retrieve before the time, the result will be empty. If no batch - size is supplied, the prefetch size will be the maximum. - :type max_batch_size: int - :rtype: list[~azure.eventhub.common.EventData] - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START eventhub_client_async_receive] - :end-before: [END eventhub_client_async_receive] - :language: python - :dedent: 4 - :caption: Sends an event data and asynchronously waits - until acknowledgement is received or operation times out. - - """ - if self.error: - raise self.error - if not self.running: - raise ValueError("Unable to receive until client has been started.") - data_batch = [] - try: - timeout_ms = 1000 * timeout if timeout else 0 - message_batch = await self._handler.receive_message_batch_async( - max_batch_size=max_batch_size, - timeout=timeout_ms) - for message in message_batch: - event_data = EventData(message=message) - self.offset = event_data.offset - data_batch.append(event_data) - return data_batch - except (errors.TokenExpired, errors.AuthenticationException): - log.info("AsyncReceiver disconnected due to token error. Attempting reconnect.") - await self.reconnect_async() - return data_batch - except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("AsyncReceiver detached. Attempting reconnect.") - await self.reconnect_async() - return data_batch - log.info("AsyncReceiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close_async(exception=error) - raise error - except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("AsyncReceiver detached. Attempting reconnect.") - await self.reconnect_async() - return data_batch - log.info("AsyncReceiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close_async(exception=error) - raise error - except Exception as e: - log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Receive failed: {}".format(e)) - await self.close_async(exception=error) - raise error diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/async_ops/sender_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/async_ops/sender_async.py deleted file mode 100644 index e2fb1cbb7022..000000000000 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/async_ops/sender_async.py +++ /dev/null @@ -1,330 +0,0 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- - -import uuid -import asyncio -import logging - -from uamqp import constants, errors -from uamqp import SendClientAsync - -from azure.eventhub import EventHubError -from azure.eventhub.sender import Sender -from azure.eventhub.common import _error_handler - -log = logging.getLogger(__name__) - - -class AsyncSender(Sender): - """ - Implements the async API of a Sender. - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START create_eventhub_client_async_sender_instance] - :end-before: [END create_eventhub_client_async_sender_instance] - :language: python - :dedent: 4 - :caption: Create a new instance of the Async Sender. - - """ - - def __init__( # pylint: disable=super-init-not-called - self, client, target, partition=None, send_timeout=60, - keep_alive=None, auto_reconnect=True, loop=None): - """ - Instantiate an EventHub event SenderAsync handler. - - :param client: The parent EventHubClientAsync. - :type client: ~azure.eventhub.async_ops.EventHubClientAsync - :param target: The URI of the EventHub to send to. - :type target: str - :param partition: The specific partition ID to send to. Default is `None`, in which case the service - will assign to all partitions using round-robin. - :type partition: str - :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is - queued. Default value is 60 seconds. If set to 0, there will be no timeout. - :type send_timeout: int - :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during - periods of inactivity. The default value is `None`, i.e. no keep alive pings. - :type keep_alive: int - :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. - Default value is `True`. - :type auto_reconnect: bool - :param loop: An event loop. If not specified the default event loop will be used. - """ - self.loop = loop or asyncio.get_event_loop() - self.running = False - self.client = client - self.target = target - self.partition = partition - self.keep_alive = keep_alive - self.auto_reconnect = auto_reconnect - self.timeout = send_timeout - self.retry_policy = errors.ErrorPolicy(max_retries=3, on_error=_error_handler) - self.reconnect_backoff = 1 - self.name = "EHSender-{}".format(uuid.uuid4()) - self.redirected = None - self.error = None - if partition: - self.target += "/Partitions/" + partition - self.name += "-partition{}".format(partition) - self._handler = SendClientAsync( - self.target, - auth=self.client.get_auth(), - debug=self.client.debug, - msg_timeout=self.timeout, - error_policy=self.retry_policy, - keep_alive_interval=self.keep_alive, - client_name=self.name, - properties=self.client.create_properties(), - loop=self.loop) - self._outcome = None - self._condition = None - - async def open_async(self): - """ - Open the Sender using the supplied conneciton. - If the handler has previously been redirected, the redirect - context will be used to create a new handler before opening it. - - :param connection: The underlying client shared connection. - :type: connection: ~uamqp.async_ops.connection_async.ConnectionAsync - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START eventhub_client_async_sender_open] - :end-before: [END eventhub_client_async_sender_open] - :language: python - :dedent: 4 - :caption: Open the Sender using the supplied conneciton. - - """ - self.running = True - if self.redirected: - self.target = self.redirected.address - self._handler = SendClientAsync( - self.target, - auth=self.client.get_auth(), - debug=self.client.debug, - msg_timeout=self.timeout, - error_policy=self.retry_policy, - keep_alive_interval=self.keep_alive, - client_name=self.name, - properties=self.client.create_properties(), - loop=self.loop) - await self._handler.open_async() - while not await self._handler.client_ready_async(): - await asyncio.sleep(0.05) - - async def _reconnect_async(self): - await self._handler.close_async() - unsent_events = self._handler.pending_messages - self._handler = SendClientAsync( - self.target, - auth=self.client.get_auth(), - debug=self.client.debug, - msg_timeout=self.timeout, - error_policy=self.retry_policy, - keep_alive_interval=self.keep_alive, - client_name=self.name, - properties=self.client.create_properties(), - loop=self.loop) - try: - await self._handler.open_async() - self._handler.queue_message(*unsent_events) - await self._handler.wait_async() - return True - except errors.TokenExpired as shutdown: - log.info("AsyncSender disconnected due to token expiry. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close_async(exception=error) - raise error - except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("AsyncSender detached. Attempting reconnect.") - return False - log.info("AsyncSender reconnect failed. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close_async(exception=error) - raise error - except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("AsyncSender detached. Attempting reconnect.") - return False - log.info("AsyncSender reconnect failed. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close_async(exception=error) - raise error - except errors.AMQPConnectionError as shutdown: - if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: - log.info("AsyncSender couldn't authenticate. Attempting reconnect.") - return False - log.info("AsyncSender connection error (%r). Shutting down.", shutdown) - error = EventHubError(str(shutdown)) - await self.close_async(exception=error) - raise error - except Exception as e: - log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Sender reconnect failed: {}".format(e)) - await self.close_async(exception=error) - raise error - - async def reconnect_async(self): - """If the Receiver was disconnected from the service with - a retryable error - attempt to reconnect.""" - while not await self._reconnect_async(): - await asyncio.sleep(self.reconnect_backoff) - - async def has_started(self): - """ - Whether the handler has completed all start up processes such as - establishing the connection, session, link and authentication, and - is not ready to process messages. - **This function is now deprecated and will be removed in v2.0+.** - - :rtype: bool - """ - # pylint: disable=protected-access - timeout = False - auth_in_progress = False - if self._handler._connection.cbs: - timeout, auth_in_progress = await self._handler._auth.handle_token_async() - if timeout: - raise EventHubError("Authorization timeout.") - if auth_in_progress: - return False - if not await self._handler._client_ready_async(): - return False - return True - - async def close_async(self, exception=None): - """ - Close down the handler. If the handler has already closed, - this will be a no op. An optional exception can be passed in to - indicate that the handler was shutdown due to error. - - :param exception: An optional exception if the handler is closing - due to an error. - :type exception: Exception - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START eventhub_client_async_sender_close] - :end-before: [END eventhub_client_async_sender_close] - :language: python - :dedent: 4 - :caption: Close down the handler. - - """ - self.running = False - if self.error: - return - if isinstance(exception, errors.LinkRedirect): - self.redirected = exception - elif isinstance(exception, EventHubError): - self.error = exception - elif isinstance(exception, (errors.LinkDetach, errors.ConnectionClose)): - self.error = EventHubError(str(exception), exception) - elif exception: - self.error = EventHubError(str(exception)) - else: - self.error = EventHubError("This send handler is now closed.") - await self._handler.close_async() - - async def send(self, event_data): - """ - Sends an event data and asynchronously waits until - acknowledgement is received or operation times out. - - :param event_data: The event to be sent. - :type event_data: ~azure.eventhub.common.EventData - :raises: ~azure.eventhub.common.EventHubError if the message fails to - send. - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START eventhub_client_async_send] - :end-before: [END eventhub_client_async_send] - :language: python - :dedent: 4 - :caption: Sends an event data and asynchronously waits - until acknowledgement is received or operation times out. - - """ - if self.error: - raise self.error - if not self.running: - raise ValueError("Unable to send until client has been started.") - if event_data.partition_key and self.partition: - raise ValueError("EventData partition key cannot be used with a partition sender.") - event_data.message.on_send_complete = self._on_outcome - try: - await self._handler.send_message_async(event_data.message) - if self._outcome != constants.MessageSendResult.Ok: - raise Sender._error(self._outcome, self._condition) - except (errors.TokenExpired, errors.AuthenticationException): - log.info("AsyncSender disconnected due to token error. Attempting reconnect.") - await self.reconnect_async() - except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("AsyncSender detached. Attempting reconnect.") - await self.reconnect_async() - else: - log.info("AsyncSender detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close_async(exception=error) - raise error - except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("AsyncSender detached. Attempting reconnect.") - await self.reconnect_async() - else: - log.info("AsyncSender detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close_async(exception=error) - raise error - except Exception as e: - log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Send failed: {}".format(e)) - await self.close_async(exception=error) - raise error - else: - return self._outcome - - async def wait_async(self): - """ - Wait until all transferred events have been sent. - """ - if self.error: - raise self.error - if not self.running: - raise ValueError("Unable to send until client has been started.") - try: - await self._handler.wait_async() - except (errors.TokenExpired, errors.AuthenticationException): - log.info("AsyncSender disconnected due to token error. Attempting reconnect.") - await self.reconnect_async() - except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("AsyncSender detached. Attempting reconnect.") - await self.reconnect_async() - else: - log.info("AsyncSender detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close_async(exception=error) - raise error - except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("AsyncSender detached. Attempting reconnect.") - await self.reconnect_async() - else: - log.info("AsyncSender detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close_async(exception=error) - raise error - except Exception as e: - log.info("Unexpected error occurred (%r).", e) - raise EventHubError("Send failed: {}".format(e)) From 384e802400ba9051a7b88802e3e8f2f021f8a737 Mon Sep 17 00:00:00 2001 From: Yijun Xie <48257664+YijunXieMS@users.noreply.github.com> Date: Tue, 21 May 2019 06:45:30 -0700 Subject: [PATCH 02/54] EventHubs track2 starter (#5330) * Move to under sdk * Remove policies * Remove debugging files * Rename Offset to EventPosition * make tests a namespace package * Revised test receive for new code * Revised test send for track two * Update async code from sync * Revise async receive and send live test for track2 * Use uamqp 1.2 * Resolve code review feedback * add queue_message to async sender * send_batch receives both list and iterator --- .../azure/eventhub/__init__.py | 22 +- .../azure/eventhub/aio/__init__.py | 9 + .../eventhub/aio/event_hubs_client_async.py | 238 ++++++++++ .../azure/eventhub/aio/receiver_async.py | 378 ++++++++++++++++ .../azure/eventhub/aio/sender_async.py | 405 +++++++++++++++++ .../azure-eventhubs/azure/eventhub/client.py | 416 ++---------------- .../azure/eventhub/client_abstract.py | 311 +++++++++++++ .../azure-eventhubs/azure/eventhub/common.py | 58 ++- .../azure/eventhub/configuration.py | 21 + .../azure/eventhub/receiver.py | 51 ++- .../azure-eventhubs/azure/eventhub/sender.py | 143 +++--- .../eventprocessorhost/eh_partition_pump.py | 7 +- .../eventprocessorhost/partition_manager.py | 5 +- sdk/eventhub/azure-eventhubs/conftest.py | 33 +- sdk/eventhub/azure-eventhubs/setup.py | 2 +- .../azure-eventhubs/tests/__init__.py | 0 .../tests/asynctests/test_receive_async.py | 236 ++++------ .../tests/asynctests/test_send_async.py | 139 ++---- .../azure-eventhubs/tests/test_receive.py | 182 +++----- .../azure-eventhubs/tests/test_send.py | 138 ++---- 20 files changed, 1863 insertions(+), 931 deletions(-) create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py create mode 100644 sdk/eventhub/azure-eventhubs/tests/__init__.py diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py index 7067761d5ef6..e2bcc43ed877 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py @@ -5,15 +5,21 @@ __version__ = "1.3.1" -from azure.eventhub.common import EventData, EventHubError, Offset +from azure.eventhub.common import EventData, EventHubError, EventPosition from azure.eventhub.client import EventHubClient from azure.eventhub.sender import Sender from azure.eventhub.receiver import Receiver +from uamqp.constants import MessageSendResult +from uamqp.constants import TransportType + +__all__ = [ + "EventData", + "EventHubError", + "EventPosition", + "EventHubClient", + "Sender", + "Receiver", + "MessageSendResult", + "TransportType", +] -try: - from azure.eventhub.async_ops import ( - EventHubClientAsync, - AsyncSender, - AsyncReceiver) -except (ImportError, SyntaxError): - pass # Python 3 async features not supported diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py new file mode 100644 index 000000000000..020392000d1f --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py @@ -0,0 +1,9 @@ +from .event_hubs_client_async import EventHubClient +from .receiver_async import Receiver +from .sender_async import Sender + +__all__ = [ + "EventHubClient", + "Receiver", + "Sender" +] diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py new file mode 100644 index 000000000000..275f76f6ee62 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py @@ -0,0 +1,238 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import logging +import asyncio +import time +import datetime +import functools + +from uamqp import authentication, constants, types, errors +from uamqp import ( + Message, + AMQPClientAsync, +) + +from azure.eventhub.common import parse_sas_token +from azure.eventhub import ( + EventHubError) +from ..client_abstract import EventHubClientAbstract + +from .sender_async import Sender +from .receiver_async import Receiver + + +log = logging.getLogger(__name__) + + +class EventHubClient(EventHubClientAbstract): + """ + The EventHubClient class defines a high level interface for asynchronously + sending events to and receiving events from the Azure Event Hubs service. + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START create_eventhub_client_async] + :end-before: [END create_eventhub_client_async] + :language: python + :dedent: 4 + :caption: Create a new instance of the Event Hub client async. + + """ + + def _create_auth(self, username=None, password=None): + """ + Create an ~uamqp.authentication.cbs_auth_async.SASTokenAuthAsync instance to authenticate + the session. + + :param username: The name of the shared access policy. + :type username: str + :param password: The shared access key. + :type password: str + """ + http_proxy = self.config.http_proxy + transport_type = self.config.transport_type + auth_timeout = self.config.auth_timeout + if self.aad_credential and self.sas_token: + raise ValueError("Can't have both sas_token and aad_credential") + + elif self.aad_credential: + get_jwt_token = functools.partial(self.aad_credential.get_token, ['https://eventhubs.azure.net//.default']) + # TODO: should use async aad_credential.get_token. Check with Charles for async identity api + return authentication.JWTTokenAsync(self.auth_uri, self.auth_uri, + get_jwt_token, http_proxy=http_proxy, + transport_type=transport_type) + elif self.sas_token: + token = self.sas_token() if callable(self.sas_token) else self.sas_token + try: + expiry = int(parse_sas_token(token)['se']) + except (KeyError, TypeError, IndexError): + raise ValueError("Supplied SAS token has no valid expiry value.") + return authentication.SASTokenAsync( + self.auth_uri, self.auth_uri, token, + expires_at=expiry, + timeout=auth_timeout, + http_proxy=http_proxy, + transport_type=transport_type) + + username = username or self._auth_config['username'] + password = password or self._auth_config['password'] + if "@sas.root" in username: + return authentication.SASLPlain( + self.address.hostname, username, password, http_proxy=http_proxy, transport_type=transport_type) + return authentication.SASTokenAsync.from_shared_access_key( + self.auth_uri, username, password, timeout=auth_timeout, http_proxy=http_proxy, transport_type=transport_type) + + async def get_eventhub_information(self): + """ + Get details on the specified EventHub async. + + :rtype: dict + """ + alt_creds = { + "username": self._auth_config.get("iot_username"), + "password":self._auth_config.get("iot_password")} + try: + mgmt_auth = self._create_auth(**alt_creds) + mgmt_client = AMQPClientAsync(self.mgmt_target, auth=mgmt_auth, debug=self.debug) + await mgmt_client.open_async() + mgmt_msg = Message(application_properties={'name': self.eh_name}) + response = await mgmt_client.mgmt_request_async( + mgmt_msg, + constants.READ_OPERATION, + op_type=b'com.microsoft:eventhub', + status_code_field=b'status-code', + description_fields=b'status-description') + eh_info = response.get_data() + output = {} + if eh_info: + output['name'] = eh_info[b'name'].decode('utf-8') + output['type'] = eh_info[b'type'].decode('utf-8') + output['created_at'] = datetime.datetime.fromtimestamp(float(eh_info[b'created_at'])/1000) + output['partition_count'] = eh_info[b'partition_count'] + output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']] + return output + finally: + await mgmt_client.close_async() + + def create_receiver( + self, consumer_group, partition, offset=None, epoch=None, operation=None, + prefetch=None, keep_alive=None, auto_reconnect=None, loop=None): + """ + Add an async receiver to the client for a particular consumer group and partition. + + :param consumer_group: The name of the consumer group. + :type consumer_group: str + :param partition: The ID of the partition. + :type partition: str + :param offset: The offset from which to start receiving. + :type offset: ~azure.eventhub.common.Offset + :param prefetch: The message prefetch count of the receiver. Default is 300. + :type prefetch: int + :operation: An optional operation to be appended to the hostname in the source URL. + The value must start with `/` character. + :type operation: str + :rtype: ~azure.eventhub.aio.receiver_async.ReceiverAsync + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START create_eventhub_client_async_receiver] + :end-before: [END create_eventhub_client_async_receiver] + :language: python + :dedent: 4 + :caption: Add an async receiver to the client for a particular consumer group and partition. + + """ + keep_alive = self.config.keep_alive if keep_alive is None else keep_alive + auto_reconnect = self.config.auto_reconnect if auto_reconnect is None else auto_reconnect + prefetch = self.config.prefetch if prefetch is None else prefetch + + path = self.address.path + operation if operation else self.address.path + source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( + self.address.hostname, path, consumer_group, partition) + handler = Receiver( + self, source_url, offset=offset, epoch=epoch, prefetch=prefetch, keep_alive=keep_alive, + auto_reconnect=auto_reconnect, loop=loop) + return handler + + def create_epoch_receiver( + self, consumer_group, partition, epoch, prefetch=300, operation=None, loop=None): + """ + Add an async receiver to the client with an epoch value. Only a single epoch receiver + can connect to a partition at any given time - additional epoch receivers must have + a higher epoch value or they will be rejected. If a 2nd epoch receiver has + connected, the first will be closed. + + :param consumer_group: The name of the consumer group. + :type consumer_group: str + :param partition: The ID of the partition. + :type partition: str + :param epoch: The epoch value for the receiver. + :type epoch: int + :param prefetch: The message prefetch count of the receiver. Default is 300. + :type prefetch: int + :operation: An optional operation to be appended to the hostname in the source URL. + The value must start with `/` character. + :type operation: str + :rtype: ~azure.eventhub.aio.receiver_async.ReceiverAsync + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START create_eventhub_client_async_epoch_receiver] + :end-before: [END create_eventhub_client_async_epoch_receiver] + :language: python + :dedent: 4 + :caption: Add an async receiver to the client with an epoch value. + + """ + return self.create_receiver(consumer_group, partition, epoch=epoch, prefetch=prefetch, + operation=operation, loop=loop) + + def create_sender( + self, partition=None, operation=None, send_timeout=None, keep_alive=None, auto_reconnect=None, loop=None): + """ + Add an async sender to the client to send ~azure.eventhub.common.EventData object + to an EventHub. + + :param partition: Optionally specify a particular partition to send to. + If omitted, the events will be distributed to available partitions via + round-robin. + :type partition: str + :operation: An optional operation to be appended to the hostname in the target URL. + The value must start with `/` character. + :type operation: str + :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is + queued. Default value is 60 seconds. If set to 0, there will be no timeout. + :type send_timeout: int + :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during + periods of inactivity. The default value is 30 seconds. If set to `None`, the connection will not + be pinged. + :type keep_alive: int + :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. + Default value is `True`. + :type auto_reconnect: bool + :rtype: ~azure.eventhub.aio.sender_async.SenderAsync + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START create_eventhub_client_async_sender] + :end-before: [END create_eventhub_client_async_sender] + :language: python + :dedent: 4 + :caption: Add an async sender to the client to + send ~azure.eventhub.common.EventData object to an EventHub. + + """ + target = "amqps://{}{}".format(self.address.hostname, self.address.path) + if operation: + target = target + operation + send_timeout = self.config.send_timeout if send_timeout is None else send_timeout + keep_alive = self.config.keep_alive if keep_alive is None else keep_alive + auto_reconnect = self.config.auto_reconnect if auto_reconnect is None else auto_reconnect + + handler = Sender( + self, target, partition=partition, send_timeout=send_timeout, + keep_alive=keep_alive, auto_reconnect=auto_reconnect, loop=loop) + return handler diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py new file mode 100644 index 000000000000..aafe4c8dcd20 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py @@ -0,0 +1,378 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import asyncio +import uuid +import logging + +from uamqp import errors, types +from uamqp import ReceiveClientAsync, Source + +from azure.eventhub import EventHubError, EventData +from azure.eventhub.common import _error_handler + +log = logging.getLogger(__name__) + + +class Receiver(object): + """ + Implements the async API of a Receiver. + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START create_eventhub_client_async_receiver_instance] + :end-before: [END create_eventhub_client_async_receiver_instance] + :language: python + :dedent: 4 + :caption: Create a new instance of the Async Receiver. + + """ + timeout = 0 + _epoch = b'com.microsoft:epoch' + + def __init__( # pylint: disable=super-init-not-called + self, client, source, offset=None, prefetch=300, epoch=None, + keep_alive=None, auto_reconnect=False, loop=None): + """ + Instantiate an async receiver. + + :param client: The parent EventHubClientAsync. + :type client: ~azure.eventhub.aio.EventHubClientAsync + :param source: The source EventHub from which to receive events. + :type source: ~uamqp.address.Source + :param prefetch: The number of events to prefetch from the service + for processing. Default is 300. + :type prefetch: int + :param epoch: An optional epoch value. + :type epoch: int + :param loop: An event loop. + """ + self.loop = loop or asyncio.get_event_loop() + self.running = False + self.client = client + self.source = source + self.offset = offset + self.prefetch = prefetch + self.epoch = epoch + self.keep_alive = keep_alive + self.auto_reconnect = auto_reconnect + self.retry_policy = errors.ErrorPolicy(max_retries=self.client.config.max_retries, on_error=_error_handler) + self.reconnect_backoff = 1 + self.redirected = None + self.error = None + self.properties = None + partition = self.source.split('/')[-1] + self.name = "EHReceiver-{}-partition{}".format(uuid.uuid4(), partition) + source = Source(self.source) + if self.offset is not None: + source.set_filter(self.offset.selector()) + if epoch: + self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))} + self._handler = ReceiveClientAsync( + source, + auth=self.client.get_auth(), + debug=self.client.config.network_tracing, + prefetch=self.prefetch, + link_properties=self.properties, + timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties(), + loop=self.loop) + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close(exc_val) + + def __aiter__(self): + self.messages_iter = self._handler.receive_messages_iter_async() + return self + + async def __anext__(self): + while True: + try: + message = await self.messages_iter.__anext__() + event_data = EventData(message=message) + self.offset = event_data.offset + return event_data + except (errors.TokenExpired, errors.AuthenticationException): + log.info("Receiver disconnected due to token error. Attempting reconnect.") + await self.reconnect() + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("Receiver detached. Attempting reconnect.") + await self.reconnect() + log.info("Receiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("Receiver detached. Attempting reconnect.") + await self.reconnect() + log.info("Receiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Receive failed: {}".format(e)) + await self.close(exception=error) + raise error + + async def open(self): + """ + Open the Receiver using the supplied conneciton. + If the handler has previously been redirected, the redirect + context will be used to create a new handler before opening it. + + :param connection: The underlying client shared connection. + :type: connection: ~uamqp.async_ops.connection_async.ConnectionAsync + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START eventhub_client_async_receiver_open] + :end-before: [END eventhub_client_async_receiver_open] + :language: python + :dedent: 4 + :caption: Open the Receiver using the supplied conneciton. + + """ + # pylint: disable=protected-access + self.running = True + if self.redirected: + self.source = self.redirected.address + source = Source(self.source) + if self.offset is not None: + source.set_filter(self.offset.selector()) + alt_creds = { + "username": self.client._auth_config.get("iot_username"), + "password":self.client._auth_config.get("iot_password")} + self._handler = ReceiveClientAsync( + source, + auth=self.client.get_auth(**alt_creds), + debug=self.client.debug, + prefetch=self.prefetch, + link_properties=self.properties, + timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties(), + loop=self.loop) + await self._handler.open_async() + while not await self._handler.client_ready_async(): + await asyncio.sleep(0.05) + + async def _reconnect(self): # pylint: disable=too-many-statements + # pylint: disable=protected-access + alt_creds = { + "username": self.client._auth_config.get("iot_username"), + "password":self.client._auth_config.get("iot_password")} + await self._handler.close_async() + source = Source(self.source) + if self.offset is not None: + source.set_filter(self.offset.selector()) + self._handler = ReceiveClientAsync( + source, + auth=self.client.get_auth(**alt_creds), + debug=self.client.debug, + prefetch=self.prefetch, + link_properties=self.properties, + timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties(), + loop=self.loop) + try: + await self._handler.open_async() + while not await self._handler.client_ready_async(): + await asyncio.sleep(0.05) + return True + except errors.TokenExpired as shutdown: + log.info("AsyncReceiver disconnected due to token expiry. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("AsyncReceiver detached. Attempting reconnect.") + return False + log.info("AsyncReceiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("AsyncReceiver detached. Attempting reconnect.") + return False + log.info("AsyncReceiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except errors.AMQPConnectionError as shutdown: + if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: + log.info("AsyncReceiver couldn't authenticate. Attempting reconnect.") + return False + log.info("AsyncReceiver connection error (%r). Shutting down.", shutdown) + error = EventHubError(str(shutdown)) + await self.close(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Receiver reconnect failed: {}".format(e)) + await self.close(exception=error) + raise error + + async def reconnect(self): + """If the Receiver was disconnected from the service with + a retryable error - attempt to reconnect.""" + while not await self._reconnect_async(): + await asyncio.sleep(self.reconnect_backoff) + + async def close(self, exception=None): + """ + Close down the handler. If the handler has already closed, + this will be a no op. An optional exception can be passed in to + indicate that the handler was shutdown due to error. + + :param exception: An optional exception if the handler is closing + due to an error. + :type exception: Exception + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START eventhub_client_async_receiver_close] + :end-before: [END eventhub_client_async_receiver_close] + :language: python + :dedent: 4 + :caption: Close down the handler. + + """ + self.running = False + if self.error: + return + if isinstance(exception, errors.LinkRedirect): + self.redirected = exception + elif isinstance(exception, EventHubError): + self.error = exception + elif isinstance(exception, (errors.LinkDetach, errors.ConnectionClose)): + self.error = EventHubError(str(exception), exception) + elif exception: + self.error = EventHubError(str(exception)) + else: + self.error = EventHubError("This receive handler is now closed.") + await self._handler.close_async() + + async def receive(self, max_batch_size=None, timeout=None): + """ + Receive events asynchronously from the EventHub. + + :param max_batch_size: Receive a batch of events. Batch size will + be up to the maximum specified, but will return as soon as service + returns no new events. If combined with a timeout and no events are + retrieve before the time, the result will be empty. If no batch + size is supplied, the prefetch size will be the maximum. + :type max_batch_size: int + :rtype: list[~azure.eventhub.common.EventData] + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START eventhub_client_async_receive] + :end-before: [END eventhub_client_async_receive] + :language: python + :dedent: 4 + :caption: Sends an event data and asynchronously waits + until acknowledgement is received or operation times out. + + """ + if self.error: + raise self.error + if not self.running: + await self.open() + data_batch = [] + try: + timeout_ms = 1000 * timeout if timeout else 0 + message_batch = await self._handler.receive_message_batch_async( + max_batch_size=max_batch_size, + timeout=timeout_ms) + for message in message_batch: + event_data = EventData(message=message) + self.offset = event_data.offset + data_batch.append(event_data) + return data_batch + except (errors.TokenExpired, errors.AuthenticationException): + log.info("AsyncReceiver disconnected due to token error. Attempting reconnect.") + await self.reconnect() + return data_batch + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("AsyncReceiver detached. Attempting reconnect.") + await self.reconnect() + return data_batch + log.info("AsyncReceiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("AsyncReceiver detached. Attempting reconnect.") + await self.reconnect() + return data_batch + log.info("AsyncReceiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Receive failed: {}".format(e)) + await self.close(exception=error) + raise error + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close(exc_val) + + def __aiter__(self): + self.messages_iter = self._handler.receive_messages_iter_async() + return self + + async def __anext__(self): + while True: + try: + message = await self.messages_iter.__anext__() + event_data = EventData(message=message) + self.offset = event_data.offset + return event_data + except (errors.TokenExpired, errors.AuthenticationException): + log.info("Receiver disconnected due to token error. Attempting reconnect.") + await self.reconnect() + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("Receiver detached. Attempting reconnect.") + await self.reconnect() + log.info("Receiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("Receiver detached. Attempting reconnect.") + await self.reconnect() + log.info("Receiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Receive failed: {}".format(e)) + await self.close(exception=error) + raise error diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py new file mode 100644 index 000000000000..0ef46d519579 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py @@ -0,0 +1,405 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import uuid +import asyncio +import logging + +from uamqp import constants, errors +from uamqp import SendClientAsync + +from azure.eventhub import MessageSendResult +from azure.eventhub import EventHubError +from azure.eventhub.common import _error_handler, _BatchSendEventData + +log = logging.getLogger(__name__) + + +class Sender(object): + """ + Implements the async API of a Sender. + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START create_eventhub_client_async_sender_instance] + :end-before: [END create_eventhub_client_async_sender_instance] + :language: python + :dedent: 4 + :caption: Create a new instance of the Async Sender. + + """ + + def __init__( # pylint: disable=super-init-not-called + self, client, target, partition=None, send_timeout=60, + keep_alive=None, auto_reconnect=False, loop=None): + """ + Instantiate an EventHub event SenderAsync handler. + + :param client: The parent EventHubClientAsync. + :type client: ~azure.eventhub.aio.EventHubClientAsync + :param target: The URI of the EventHub to send to. + :type target: str + :param partition: The specific partition ID to send to. Default is `None`, in which case the service + will assign to all partitions using round-robin. + :type partition: str + :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is + queued. Default value is 60 seconds. If set to 0, there will be no timeout. + :type send_timeout: int + :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during + periods of inactivity. The default value is `None`, i.e. no keep alive pings. + :type keep_alive: int + :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. + Default value is `True`. + :type auto_reconnect: bool + :param loop: An event loop. If not specified the default event loop will be used. + """ + self.loop = loop or asyncio.get_event_loop() + self.running = False + self.client = client + self.target = target + self.partition = partition + self.keep_alive = keep_alive + self.auto_reconnect = auto_reconnect + self.timeout = send_timeout + self.retry_policy = errors.ErrorPolicy(max_retries=self.client.config.max_retries, on_error=_error_handler) + self.reconnect_backoff = 1 + self.name = "EHSender-{}".format(uuid.uuid4()) + self.redirected = None + self.error = None + if partition: + self.target += "/Partitions/" + partition + self.name += "-partition{}".format(partition) + self._handler = SendClientAsync( + self.target, + auth=self.client.get_auth(), + debug=self.client.debug, + msg_timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties(), + loop=self.loop) + self._outcome = None + self._condition = None + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close(exc_val) + + async def open(self): + """ + Open the Sender using the supplied conneciton. + If the handler has previously been redirected, the redirect + context will be used to create a new handler before opening it. + + :param connection: The underlying client shared connection. + :type: connection: ~uamqp.async_ops.connection_async.ConnectionAsync + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START eventhub_client_async_sender_open] + :end-before: [END eventhub_client_async_sender_open] + :language: python + :dedent: 4 + :caption: Open the Sender using the supplied conneciton. + + """ + self.running = True + if self.redirected: + self.target = self.redirected.address + self._handler = SendClientAsync( + self.target, + auth=self.client.get_auth(), + debug=self.client.debug, + msg_timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties(), + loop=self.loop) + await self._handler.open_async() + while not await self._handler.client_ready_async(): + await asyncio.sleep(0.05) + + async def _reconnect(self): + await self._handler.close_async() + unsent_events = self._handler.pending_messages + self._handler = SendClientAsync( + self.target, + auth=self.client.get_auth(), + debug=self.client.debug, + msg_timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client.create_properties(), + loop=self.loop) + try: + await self._handler.open_async() + self._handler.queue_message(*unsent_events) + await self._handler.wait_async() + return True + except errors.TokenExpired as shutdown: + log.info("AsyncSender disconnected due to token expiry. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("AsyncSender detached. Attempting reconnect.") + return False + log.info("AsyncSender reconnect failed. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("AsyncSender detached. Attempting reconnect.") + return False + log.info("AsyncSender reconnect failed. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except errors.AMQPConnectionError as shutdown: + if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: + log.info("AsyncSender couldn't authenticate. Attempting reconnect.") + return False + log.info("AsyncSender connection error (%r). Shutting down.", shutdown) + error = EventHubError(str(shutdown)) + await self.close(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Sender reconnect failed: {}".format(e)) + await self.close(exception=error) + raise error + + async def reconnect(self): + """If the Receiver was disconnected from the service with + a retryable error - attempt to reconnect.""" + while not await self._reconnect(): + await asyncio.sleep(self.reconnect_backoff) + + async def close(self, exception=None): + """ + Close down the handler. If the handler has already closed, + this will be a no op. An optional exception can be passed in to + indicate that the handler was shutdown due to error. + + :param exception: An optional exception if the handler is closing + due to an error. + :type exception: Exception + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START eventhub_client_async_sender_close] + :end-before: [END eventhub_client_async_sender_close] + :language: python + :dedent: 4 + :caption: Close down the handler. + + """ + self.running = False + if self.error: + return + if isinstance(exception, errors.LinkRedirect): + self.redirected = exception + elif isinstance(exception, EventHubError): + self.error = exception + elif isinstance(exception, (errors.LinkDetach, errors.ConnectionClose)): + self.error = EventHubError(str(exception), exception) + elif exception: + self.error = EventHubError(str(exception)) + else: + self.error = EventHubError("This send handler is now closed.") + await self._handler.close_async() + + async def _send_event_data(self, event_data): + if not self.running: + await self.open() + try: + self._handler.send_message(event_data.message) + if self._outcome != MessageSendResult.Ok: + raise Sender._error(self._outcome, self._condition) + except errors.MessageException as failed: + error = EventHubError(str(failed), failed) + await self.close(exception=error) + raise error + except (errors.TokenExpired, errors.AuthenticationException): + log.info("Sender disconnected due to token error. Attempting reconnect.") + await self.reconnect() + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("Sender detached. Attempting reconnect.") + await self.reconnect() + else: + log.info("Sender detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("Sender detached. Attempting reconnect.") + await self.reconnect() + else: + log.info("Sender detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Send failed: {}".format(e)) + await self.close(exception=error) + raise error + else: + return self._outcome + + async def send(self, event_data): + """ + Sends an event data and asynchronously waits until + acknowledgement is received or operation times out. + + :param event_data: The event to be sent. + :type event_data: ~azure.eventhub.common.EventData + :raises: ~azure.eventhub.common.EventHubError if the message fails to + send. + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START eventhub_client_async_send] + :end-before: [END eventhub_client_async_send] + :language: python + :dedent: 4 + :caption: Sends an event data and asynchronously waits + until acknowledgement is received or operation times out. + + """ + if self.error: + raise self.error + if event_data.partition_key and self.partition: + raise ValueError("EventData partition key cannot be used with a partition sender.") + event_data.message.on_send_complete = self._on_outcome + await self._send_event_data(event_data) + + async def send_batch(self, batch_event_data): + """ + Sends an event data and blocks until acknowledgement is + received or operation times out. + + :param event_data: The event to be sent. + :type event_data: ~azure.eventhub.common.EventData + :raises: ~azure.eventhub.common.EventHubError if the message fails to + send. + :return: The outcome of the message send. + :rtype: ~uamqp.constants.MessageSendResult + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START eventhub_client_sync_send] + :end-before: [END eventhub_client_sync_send] + :language: python + :dedent: 4 + :caption: Sends an event data and blocks until acknowledgement is received or operation times out. + + """ + if self.error: + raise self.error + + def verify_partition(event_datas): + ed_iter = iter(event_datas) + try: + ed = next(ed_iter) + partition_key = ed.partition_key + yield ed + except StopIteration: + raise ValueError("batch_event_data must not be empty") + for ed in ed_iter: + if ed.partition_key != partition_key: + raise ValueError("partition key of all EventData must be the same if being sent in a batch") + yield ed + + wrapper_event_data = _BatchSendEventData(verify_partition(batch_event_data)) + wrapper_event_data.message.on_send_complete = self._on_outcome + return await self._send_event_data(wrapper_event_data) + + def queue_message(self, event_data, callback=None): + """ + Transfers an event data and notifies the callback when the operation is done. + + :param event_data: The event to be sent. + :type event_data: ~azure.eventhub.common.EventData + :param callback: Callback to be run once the message has been send. + This must be a function that accepts two arguments. + :type callback: callable[~uamqp.constants.MessageSendResult, ~azure.eventhub.common.EventHubError] + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START eventhub_client_transfer] + :end-before: [END eventhub_client_transfer] + :language: python + :dedent: 4 + :caption: Transfers an event data and notifies the callback when the operation is done. + + """ + if self.error: + raise self.error + if not self.running: + self.open() + if event_data.partition_key and self.partition: + raise ValueError("EventData partition key cannot be used with a partition sender.") + if callback: + event_data.message.on_send_complete = lambda o, c: callback(o, Sender._error(o, c)) + self._handler.queue_message(event_data.message) + + async def send_pending_messages(self): + """ + Wait until all transferred events have been sent. + """ + if self.error: + raise self.error + if not self.running: + raise ValueError("Unable to send until client has been started.") + try: + await self._handler.wait_async() + except (errors.TokenExpired, errors.AuthenticationException): + log.info("AsyncSender disconnected due to token error. Attempting reconnect.") + await self.reconnect() + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("AsyncSender detached. Attempting reconnect.") + await self.reconnect() + else: + log.info("AsyncSender detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("AsyncSender detached. Attempting reconnect.") + await self.reconnect() + else: + log.info("AsyncSender detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r).", e) + raise EventHubError("Send failed: {}".format(e)) + + def _on_outcome(self, outcome, condition): + """ + Called when the outcome is received for a delivery. + + :param outcome: The outcome of the message delivery - success or failure. + :type outcome: ~uamqp.constants.MessageSendResult + """ + self._outcome = outcome + self._condition = condition + + @staticmethod + def _error(outcome, condition): + return None if outcome == MessageSendResult.Ok else EventHubError(outcome, condition) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index a50babfca8c3..58cd975e4baf 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -25,67 +25,13 @@ from azure.eventhub.sender import Sender from azure.eventhub.receiver import Receiver from azure.eventhub.common import EventHubError, parse_sas_token +from .client_abstract import EventHubClientAbstract log = logging.getLogger(__name__) -def _parse_conn_str(conn_str): - endpoint = None - shared_access_key_name = None - shared_access_key = None - entity_path = None - for element in conn_str.split(';'): - key, _, value = element.partition('=') - if key.lower() == 'endpoint': - endpoint = value.rstrip('/') - elif key.lower() == 'hostname': - endpoint = value.rstrip('/') - elif key.lower() == 'sharedaccesskeyname': - shared_access_key_name = value - elif key.lower() == 'sharedaccesskey': - shared_access_key = value - elif key.lower() == 'entitypath': - entity_path = value - if not all([endpoint, shared_access_key_name, shared_access_key]): - raise ValueError("Invalid connection string") - return endpoint, shared_access_key_name, shared_access_key, entity_path - - -def _generate_sas_token(uri, policy, key, expiry=None): - """Create a shared access signiture token as a string literal. - :returns: SAS token as string literal. - :rtype: str - """ - from base64 import b64encode, b64decode - from hashlib import sha256 - from hmac import HMAC - if not expiry: - expiry = time.time() + 3600 # Default to 1 hour. - encoded_uri = quote_plus(uri) - ttl = int(expiry) - sign_key = '%s\n%d' % (encoded_uri, ttl) - signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest()) - result = { - 'sr': uri, - 'sig': signature, - 'se': str(ttl)} - if policy: - result['skn'] = policy - return 'SharedAccessSignature ' + urlencode(result) - - -def _build_uri(address, entity): - parsed = urlparse(address) - if parsed.path: - return address - if not entity: - raise ValueError("No EventHub specified") - address += "/" + str(entity) - return address - - -class EventHubClient(object): +class EventHubClient(EventHubClientAbstract): """ The EventHubClient class defines a high level interface for sending events to and receiving events from the Azure Event Hubs service. @@ -100,166 +46,6 @@ class EventHubClient(object): """ - def __init__( - self, address, username=None, password=None, debug=False, - http_proxy=None, auth_timeout=60, sas_token=None): - """ - Constructs a new EventHubClient with the given address URL. - - :param address: The full URI string of the Event Hub. This can optionally - include URL-encoded access name and key. - :type address: str - :param username: The name of the shared access policy. This must be supplied - if not encoded into the address. - :type username: str - :param password: The shared access key. This must be supplied if not encoded - into the address. - :type password: str - :param debug: Whether to output network trace logs to the logger. Default - is `False`. - :type debug: bool - :param http_proxy: HTTP proxy settings. This must be a dictionary with the following - keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). - Additionally the following keys may also be present: 'username', 'password'. - :type http_proxy: dict[str, Any] - :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. - The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. - :type auth_timeout: int - :param sas_token: A SAS token or function that returns a SAS token. If a function is supplied, - it will be used to retrieve subsequent tokens in the case of token expiry. The function should - take no arguments. - :type sas_token: str or callable - """ - self.container_id = "eventhub.pysdk-" + str(uuid.uuid4())[:8] - self.sas_token = sas_token - self.address = urlparse(address) - self.eh_name = self.address.path.lstrip('/') - self.http_proxy = http_proxy - self.mgmt_target = "amqps://{}/{}".format(self.address.hostname, self.eh_name) - url_username = unquote_plus(self.address.username) if self.address.username else None - username = username or url_username - url_password = unquote_plus(self.address.password) if self.address.password else None - password = password or url_password - if (not username or not password) and not sas_token: - raise ValueError("Please supply either username and password, or a SAS token") - self.auth_uri = "sb://{}{}".format(self.address.hostname, self.address.path) - self._auth_config = {'username': username, 'password': password} - self.get_auth = functools.partial(self._create_auth) - self.debug = debug - self.auth_timeout = auth_timeout - - self.clients = [] - self.stopped = False - log.info("%r: Created the Event Hub client", self.container_id) - - @classmethod - def from_sas_token(cls, address, sas_token, eventhub=None, **kwargs): - """Create an EventHubClient from an existing auth token or token generator. - - :param address: The Event Hub address URL - :type address: str - :param sas_token: A SAS token or function that returns a SAS token. If a function is supplied, - it will be used to retrieve subsequent tokens in the case of token expiry. The function should - take no arguments. - :type sas_token: str or callable - :param eventhub: The name of the EventHub, if not already included in the address URL. - :type eventhub: str - :param debug: Whether to output network trace logs to the logger. Default - is `False`. - :type debug: bool - :param http_proxy: HTTP proxy settings. This must be a dictionary with the following - keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). - Additionally the following keys may also be present: 'username', 'password'. - :type http_proxy: dict[str, Any] - :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. - The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. - :type auth_timeout: int - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START create_eventhub_client_sas_token] - :end-before: [END create_eventhub_client_sas_token] - :language: python - :dedent: 4 - :caption: Create an EventHubClient from an existing auth token or token generator. - - """ - address = _build_uri(address, eventhub) - return cls(address, sas_token=sas_token, **kwargs) - - @classmethod - def from_connection_string(cls, conn_str, eventhub=None, **kwargs): - """Create an EventHubClient from a connection string. - - :param conn_str: The connection string. - :type conn_str: str - :param eventhub: The name of the EventHub, if the EntityName is - not included in the connection string. - :type eventhub: str - :param debug: Whether to output network trace logs to the logger. Default - is `False`. - :type debug: bool - :param http_proxy: HTTP proxy settings. This must be a dictionary with the following - keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). - Additionally the following keys may also be present: 'username', 'password'. - :type http_proxy: dict[str, Any] - :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. - The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. - :type auth_timeout: int - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START create_eventhub_client_connstr] - :end-before: [END create_eventhub_client_connstr] - :language: python - :dedent: 4 - :caption: Create an EventHubClient from a connection string. - - """ - address, policy, key, entity = _parse_conn_str(conn_str) - entity = eventhub or entity - address = _build_uri(address, entity) - return cls(address, username=policy, password=key, **kwargs) - - @classmethod - def from_iothub_connection_string(cls, conn_str, **kwargs): - """ - Create an EventHubClient from an IoTHub connection string. - - :param conn_str: The connection string. - :type conn_str: str - :param debug: Whether to output network trace logs to the logger. Default - is `False`. - :type debug: bool - :param http_proxy: HTTP proxy settings. This must be a dictionary with the following - keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). - Additionally the following keys may also be present: 'username', 'password'. - :type http_proxy: dict[str, Any] - :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. - The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. - :type auth_timeout: int - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START create_eventhub_client_iot_connstr] - :end-before: [END create_eventhub_client_iot_connstr] - :language: python - :dedent: 4 - :caption: Create an EventHubClient from an IoTHub connection string. - - """ - address, policy, key, _ = _parse_conn_str(conn_str) - hub_name = address.split('.')[0] - username = "{}@sas.root.{}".format(policy, hub_name) - password = _generate_sas_token(address, policy, key) - client = cls("amqps://" + address, username=username, password=password, **kwargs) - client._auth_config = { # pylint: disable=protected-access - 'iot_username': policy, - 'iot_password': key, - 'username': username, - 'password': password} - return client - def _create_auth(self, username=None, password=None): """ Create an ~uamqp.authentication.SASTokenAuth instance to authenticate @@ -270,7 +56,18 @@ def _create_auth(self, username=None, password=None): :param password: The shared access key. :type password: str """ - if self.sas_token: + http_proxy = self.config.http_proxy + transport_type = self.config.transport_type + auth_timeout = self.config.auth_timeout + if self.aad_credential and self.sas_token: + raise ValueError("Can't have both sas_token and aad_credential") + + elif self.aad_credential: + get_jwt_token = functools.partial(self.aad_credential.get_token, ['https://eventhubs.azure.net//.default']) + return authentication.JWTTokenAuth(self.auth_uri, self.auth_uri, + get_jwt_token, http_proxy=http_proxy, + transport_type=transport_type) + elif self.sas_token: token = self.sas_token() if callable(self.sas_token) else self.sas_token try: expiry = int(parse_sas_token(token)['se']) @@ -279,122 +76,19 @@ def _create_auth(self, username=None, password=None): return authentication.SASTokenAuth( self.auth_uri, self.auth_uri, token, expires_at=expiry, - timeout=self.auth_timeout, - http_proxy=self.http_proxy) + timeout=auth_timeout, + http_proxy=http_proxy, + transport_type=transport_type) username = username or self._auth_config['username'] password = password or self._auth_config['password'] if "@sas.root" in username: return authentication.SASLPlain( - self.address.hostname, username, password, http_proxy=self.http_proxy) + self.address.hostname, username, password, http_proxy=http_proxy, transport_type=transport_type) return authentication.SASTokenAuth.from_shared_access_key( - self.auth_uri, username, password, timeout=self.auth_timeout, http_proxy=self.http_proxy) - - def create_properties(self): # pylint: disable=no-self-use - """ - Format the properties with which to instantiate the connection. - This acts like a user agent over HTTP. - - :rtype: dict - """ - properties = {} - properties["product"] = "eventhub.python" - properties["version"] = __version__ - properties["framework"] = "Python {}.{}.{}".format(*sys.version_info[0:3]) - properties["platform"] = sys.platform - return properties - - def _close_clients(self): - """ - Close all open Sender/Receiver clients. - """ - for client in self.clients: - client.close() + self.auth_uri, username, password, timeout=auth_timeout, http_proxy=http_proxy, transport_type=transport_type) - def _start_clients(self): - for client in self.clients: - try: - if not client.running: - client.open() - except Exception as exp: # pylint: disable=broad-except - client.close(exception=exp) - - def _process_redirect_uri(self, redirect): - redirect_uri = redirect.address.decode('utf-8') - auth_uri, _, _ = redirect_uri.partition("/ConsumerGroups") - self.address = urlparse(auth_uri) - self.auth_uri = "sb://{}{}".format(self.address.hostname, self.address.path) - self.eh_name = self.address.path.lstrip('/') - self.mgmt_target = redirect_uri - - def _handle_redirect(self, redirects): - if len(redirects) != len(self.clients): - raise EventHubError("Some clients are attempting to redirect the connection.") - if not all(r.hostname == redirects[0].hostname for r in redirects): - raise EventHubError("Multiple clients attempting to redirect to different hosts.") - self._process_redirect_uri(redirects[0]) - for client in self.clients: - client.open() - - def run(self): - """ - Run the EventHubClient in blocking mode. - Opens the connection and starts running all Sender/Receiver clients. - Returns a list of the start up results. For a succcesful client start the - result will be `None`, otherwise the exception raised. - If all clients failed to start, then run will fail, shut down the connection - and raise an exception. - If at least one client starts up successfully the run command will succeed. - - :rtype: list[~azure.eventhub.common.EventHubError] - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START eventhub_client_run] - :end-before: [END eventhub_client_run] - :language: python - :dedent: 4 - :caption: Run the EventHubClient in blocking mode. - - """ - log.info("%r: Starting %r clients", self.container_id, len(self.clients)) - try: - self._start_clients() - redirects = [c.redirected for c in self.clients if c.redirected] - failed = [c.error for c in self.clients if c.error] - if failed and len(failed) == len(self.clients): - log.warning("%r: All clients failed to start.", self.container_id) - raise failed[0] - if failed: - log.warning("%r: %r clients failed to start.", self.container_id, len(failed)) - elif redirects: - self._handle_redirect(redirects) - except EventHubError: - self.stop() - raise - except Exception as e: - self.stop() - raise EventHubError(str(e)) - return failed - - def stop(self): - """ - Stop the EventHubClient and all its Sender/Receiver clients. - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START eventhub_client_stop] - :end-before: [END eventhub_client_stop] - :language: python - :dedent: 4 - :caption: Stop the EventHubClient and all its Sender/Receiver clients. - - """ - log.info("%r: Stopping %r clients", self.container_id, len(self.clients)) - self.stopped = True - self._close_clients() - - def get_eventhub_info(self): + def get_eventhub_information(self): """ Get details on the specified EventHub. Keys in the details dictionary include: @@ -409,7 +103,7 @@ def get_eventhub_info(self): """ alt_creds = { "username": self._auth_config.get("iot_username"), - "password":self._auth_config.get("iot_password")} + "password": self._auth_config.get("iot_password")} try: mgmt_auth = self._create_auth(**alt_creds) mgmt_client = uamqp.AMQPClient(self.mgmt_target, auth=mgmt_auth, debug=self.debug) @@ -433,9 +127,12 @@ def get_eventhub_info(self): finally: mgmt_client.close() - def add_receiver( - self, consumer_group, partition, offset=None, prefetch=300, - operation=None, keep_alive=30, auto_reconnect=True): + def create_receiver( + self, consumer_group, partition, offset=None, epoch=None, operation=None, + prefetch=None, + keep_alive=None, + auto_reconnect=None, + ): """ Add a receiver to the client for a particular consumer group and partition. @@ -461,56 +158,23 @@ def add_receiver( :caption: Add a receiver to the client for a particular consumer group and partition. """ + keep_alive = self.config.keep_alive if keep_alive is None else keep_alive + auto_reconnect = self.config.auto_reconnect if auto_reconnect is None else auto_reconnect + prefetch = self.config.prefetch if prefetch is None else prefetch + path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, path, consumer_group, partition) handler = Receiver( - self, source_url, offset=offset, prefetch=prefetch, - keep_alive=keep_alive, auto_reconnect=auto_reconnect) - self.clients.append(handler) + self, source_url, offset=offset, epoch=epoch, prefetch=prefetch, keep_alive=keep_alive, auto_reconnect=auto_reconnect) return handler - def add_epoch_receiver( + def create_epoch_receiver( self, consumer_group, partition, epoch, prefetch=300, - operation=None, keep_alive=30, auto_reconnect=True): - """ - Add a receiver to the client with an epoch value. Only a single epoch receiver - can connect to a partition at any given time - additional epoch receivers must have - a higher epoch value or they will be rejected. If a 2nd epoch receiver has - connected, the first will be closed. + operation=None): + return self.create_receiver(consumer_group, partition, epoch=epoch, prefetch=prefetch, operation=operation) - :param consumer_group: The name of the consumer group. - :type consumer_group: str - :param partition: The ID of the partition. - :type partition: str - :param epoch: The epoch value for the receiver. - :type epoch: int - :param prefetch: The message prefetch count of the receiver. Default is 300. - :type prefetch: int - :operation: An optional operation to be appended to the hostname in the source URL. - The value must start with `/` character. - :type operation: str - :rtype: ~azure.eventhub.receiver.Receiver - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START create_eventhub_client_epoch_receiver] - :end-before: [END create_eventhub_client_epoch_receiver] - :language: python - :dedent: 4 - :caption: Add a receiver to the client with an epoch value. - - """ - path = self.address.path + operation if operation else self.address.path - source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( - self.address.hostname, path, consumer_group, partition) - handler = Receiver( - self, source_url, prefetch=prefetch, epoch=epoch, - keep_alive=keep_alive, auto_reconnect=auto_reconnect) - self.clients.append(handler) - return handler - - def add_sender(self, partition=None, operation=None, send_timeout=60, keep_alive=30, auto_reconnect=True): + def create_sender(self, partition=None, operation=None, send_timeout=None, keep_alive=None, auto_reconnect=None): """ Add a sender to the client to send EventData object to an EventHub. @@ -544,8 +208,10 @@ def add_sender(self, partition=None, operation=None, send_timeout=60, keep_alive target = "amqps://{}{}".format(self.address.hostname, self.address.path) if operation: target = target + operation + send_timeout = self.config.send_timeout if send_timeout is None else send_timeout + keep_alive = self.config.keep_alive if keep_alive is None else keep_alive + auto_reconnect = self.config.auto_reconnect if auto_reconnect is None else auto_reconnect + handler = Sender( - self, target, partition=partition, send_timeout=send_timeout, - keep_alive=keep_alive, auto_reconnect=auto_reconnect) - self.clients.append(handler) + self, target, partition=partition, send_timeout=send_timeout, keep_alive=keep_alive, auto_reconnect=auto_reconnect) return handler diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py new file mode 100644 index 000000000000..1435d15bd2be --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -0,0 +1,311 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +from __future__ import unicode_literals + +import logging +import datetime +import sys +import uuid +import time +import functools +from abc import abstractmethod +try: + from urlparse import urlparse + from urllib import unquote_plus, urlencode, quote_plus +except ImportError: + from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus + +import uamqp +from uamqp import Message +from uamqp import authentication +from uamqp import constants + +from azure.eventhub import __version__ +from azure.eventhub.sender import Sender +from azure.eventhub.receiver import Receiver +from azure.eventhub.common import EventHubError, parse_sas_token +from azure.eventhub.configuration import Configuration + +log = logging.getLogger(__name__) + + +def _parse_conn_str(conn_str): + endpoint = None + shared_access_key_name = None + shared_access_key = None + entity_path = None + for element in conn_str.split(';'): + key, _, value = element.partition('=') + if key.lower() == 'endpoint': + endpoint = value.rstrip('/') + elif key.lower() == 'hostname': + endpoint = value.rstrip('/') + elif key.lower() == 'sharedaccesskeyname': + shared_access_key_name = value + elif key.lower() == 'sharedaccesskey': + shared_access_key = value + elif key.lower() == 'entitypath': + entity_path = value + if not all([endpoint, shared_access_key_name, shared_access_key]): + raise ValueError("Invalid connection string") + return endpoint, shared_access_key_name, shared_access_key, entity_path + + +def _generate_sas_token(uri, policy, key, expiry=None): + """Create a shared access signiture token as a string literal. + :returns: SAS token as string literal. + :rtype: str + """ + from base64 import b64encode, b64decode + from hashlib import sha256 + from hmac import HMAC + if not expiry: + expiry = time.time() + 3600 # Default to 1 hour. + encoded_uri = quote_plus(uri) + ttl = int(expiry) + sign_key = '%s\n%d' % (encoded_uri, ttl) + signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest()) + result = { + 'sr': uri, + 'sig': signature, + 'se': str(ttl)} + if policy: + result['skn'] = policy + return 'SharedAccessSignature ' + urlencode(result) + + +def _build_uri(address, entity): + parsed = urlparse(address) + if parsed.path: + return address + if not entity: + raise ValueError("No EventHub specified") + address += "/" + str(entity) + return address + + +class EventHubClientAbstract(object): + """ + The EventHubClient class defines a high level interface for sending + events to and receiving events from the Azure Event Hubs service. + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START create_eventhub_client] + :end-before: [END create_eventhub_client] + :language: python + :dedent: 4 + :caption: Create a new instance of the Event Hub client + + """ + + def __init__( + self, address, username=None, password=None, sas_token=None, aad_credential=None, **kwargs): + """ + Constructs a new EventHubClient with the given address URL. + + :param address: The full URI string of the Event Hub. This can optionally + include URL-encoded access name and key. + :type address: str + :param username: The name of the shared access policy. This must be supplied + if not encoded into the address. + :type username: str + :param password: The shared access key. This must be supplied if not encoded + into the address. + :type password: str + :param debug: Whether to output network trace logs to the logger. Default + is `False`. + :type debug: bool + :param http_proxy: HTTP proxy settings. This must be a dictionary with the following + keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). + Additionally the following keys may also be present: 'username', 'password'. + :type http_proxy: dict[str, Any] + :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. + The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. + :type auth_timeout: int + :param sas_token: A SAS token or function that returns a SAS token. If a function is supplied, + it will be used to retrieve subsequent tokens in the case of token expiry. The function should + take no arguments. + :type sas_token: str or callable + """ + self.container_id = "eventhub.pysdk-" + str(uuid.uuid4())[:8] + self.sas_token = sas_token + self.address = urlparse(address) + self.aad_credential = aad_credential + self.eh_name = self.address.path.lstrip('/') + # self.http_proxy = kwargs.get("http_proxy") + self.keep_alive = kwargs.get("keep_alive", 30) + self.auto_reconnect = kwargs.get("auto_reconnect", True) + self.mgmt_target = "amqps://{}/{}".format(self.address.hostname, self.eh_name) + url_username = unquote_plus(self.address.username) if self.address.username else None + username = username or url_username + url_password = unquote_plus(self.address.password) if self.address.password else None + password = password or url_password + if (not username or not password) and not sas_token: + raise ValueError("Please supply either username and password, or a SAS token") + self.auth_uri = "sb://{}{}".format(self.address.hostname, self.address.path) + self._auth_config = {'username': username, 'password': password} + self.get_auth = functools.partial(self._create_auth) + # self.debug = kwargs.get("debug", False) # debug + #self.auth_timeout = auth_timeout + + self.stopped = False + self.config = Configuration(**kwargs) + self.debug = self.config.network_tracing + + log.info("%r: Created the Event Hub client", self.container_id) + + @classmethod + def from_sas_token(cls, address, sas_token, eventhub=None, **kwargs): + """Create an EventHubClient from an existing auth token or token generator. + + :param address: The Event Hub address URL + :type address: str + :param sas_token: A SAS token or function that returns a SAS token. If a function is supplied, + it will be used to retrieve subsequent tokens in the case of token expiry. The function should + take no arguments. + :type sas_token: str or callable + :param eventhub: The name of the EventHub, if not already included in the address URL. + :type eventhub: str + :param debug: Whether to output network trace logs to the logger. Default + is `False`. + :type debug: bool + :param http_proxy: HTTP proxy settings. This must be a dictionary with the following + keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). + Additionally the following keys may also be present: 'username', 'password'. + :type http_proxy: dict[str, Any] + :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. + The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. + :type auth_timeout: int + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START create_eventhub_client_sas_token] + :end-before: [END create_eventhub_client_sas_token] + :language: python + :dedent: 4 + :caption: Create an EventHubClient from an existing auth token or token generator. + + """ + address = _build_uri(address, eventhub) + return cls(address, sas_token=sas_token, **kwargs) + + @classmethod + def from_connection_string(cls, conn_str, eventhub=None, **kwargs): + """Create an EventHubClient from a connection string. + + :param conn_str: The connection string. + :type conn_str: str + :param eventhub: The name of the EventHub, if the EntityName is + not included in the connection string. + :type eventhub: str + :param debug: Whether to output network trace logs to the logger. Default + is `False`. + :type debug: bool + :param http_proxy: HTTP proxy settings. This must be a dictionary with the following + keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). + Additionally the following keys may also be present: 'username', 'password'. + :type http_proxy: dict[str, Any] + :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. + The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. + :type auth_timeout: int + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START create_eventhub_client_connstr] + :end-before: [END create_eventhub_client_connstr] + :language: python + :dedent: 4 + :caption: Create an EventHubClient from a connection string. + + """ + address, policy, key, entity = _parse_conn_str(conn_str) + entity = eventhub or entity + address = _build_uri(address, entity) + return cls(address, username=policy, password=key, **kwargs) + + @classmethod + def from_iothub_connection_string(cls, conn_str, **kwargs): + """ + Create an EventHubClient from an IoTHub connection string. + + :param conn_str: The connection string. + :type conn_str: str + :param debug: Whether to output network trace logs to the logger. Default + is `False`. + :type debug: bool + :param http_proxy: HTTP proxy settings. This must be a dictionary with the following + keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). + Additionally the following keys may also be present: 'username', 'password'. + :type http_proxy: dict[str, Any] + :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. + The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. + :type auth_timeout: int + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START create_eventhub_client_iot_connstr] + :end-before: [END create_eventhub_client_iot_connstr] + :language: python + :dedent: 4 + :caption: Create an EventHubClient from an IoTHub connection string. + + """ + address, policy, key, _ = _parse_conn_str(conn_str) + hub_name = address.split('.')[0] + username = "{}@sas.root.{}".format(policy, hub_name) + password = _generate_sas_token(address, policy, key) + client = cls("amqps://" + address, username=username, password=password, **kwargs) + client._auth_config = { # pylint: disable=protected-access + 'iot_username': policy, + 'iot_password': key, + 'username': username, + 'password': password} + return client + + @classmethod + def from_aad_credential(cls, address, aad_credential, eventhub=None, **kwargs): + address = _build_uri(address, eventhub) + return cls(address, aad_credential=aad_credential, **kwargs) + + @abstractmethod + def _create_auth(self, username=None, password=None): + pass + + def create_properties(self): # pylint: disable=no-self-use + """ + Format the properties with which to instantiate the connection. + This acts like a user agent over HTTP. + + :rtype: dict + """ + properties = {} + properties["product"] = "eventhub.python" + properties["version"] = __version__ + properties["framework"] = "Python {}.{}.{}".format(*sys.version_info[0:3]) + properties["platform"] = sys.platform + return properties + + def _process_redirect_uri(self, redirect): + redirect_uri = redirect.address.decode('utf-8') + auth_uri, _, _ = redirect_uri.partition("/ConsumerGroups") + self.address = urlparse(auth_uri) + self.auth_uri = "sb://{}{}".format(self.address.hostname, self.address.path) + self.eh_name = self.address.path.lstrip('/') + self.mgmt_target = redirect_uri + + @abstractmethod + def get_eventhub_information(self): + pass + + @abstractmethod + def create_receiver( + self, consumer_group, partition, epoch=None, offset=None, prefetch=300, + operation=None): + pass + + @abstractmethod + def create_sender(self, partition=None, operation=None, send_timeout=60): + pass diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index 76e315d2a25e..03a602616c4d 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -4,6 +4,7 @@ # -------------------------------------------------------------------------------------------- from __future__ import unicode_literals +from enum import Enum import datetime import calendar import json @@ -83,7 +84,7 @@ class EventData(object): PROP_TIMESTAMP = b"x-opt-enqueued-time" PROP_DEVICE_ID = b"iothub-connection-device-id" - def __init__(self, body=None, batch=None, to_device=None, message=None): + def __init__(self, body=None, to_device=None, message=None): """ Initialize EventData. @@ -102,9 +103,7 @@ def __init__(self, body=None, batch=None, to_device=None, message=None): self.msg_properties = MessageProperties() if to_device: self.msg_properties.to = '/devices/{}/messages/devicebound'.format(to_device) - if batch: - self.message = BatchMessage(data=batch, multi_messages=True, properties=self.msg_properties) - elif message: + if message: self.message = message self.msg_properties = message.properties self._annotations = message.annotations @@ -136,7 +135,7 @@ def offset(self): :rtype: ~azure.eventhub.common.Offset """ try: - return Offset(self._annotations[EventData.PROP_OFFSET].decode('UTF-8')) + return EventPosition(self._annotations[EventData.PROP_OFFSET].decode('UTF-8')) except (KeyError, AttributeError): return None @@ -208,7 +207,7 @@ def application_properties(self, value): :type value: dict """ self._app_properties = value - properties = dict(self._app_properties) + properties = None if value is None else dict(self._app_properties) self.message.application_properties = properties @property @@ -258,23 +257,32 @@ def body_as_json(self, encoding='UTF-8'): except Exception as e: raise TypeError("Event data is not compatible with JSON type: {}".format(e)) + def encode_message(self): + return self.message.encode_message() -class Offset(object): + +class _BatchSendEventData(EventData): + def __init__(self, batch_event_data): + # TODO: rethink if to_device should be included in + self.message = BatchMessage(data=batch_event_data, multi_messages=True, properties=None) + + +class EventPosition(object): """ - The offset (position or timestamp) where a receiver starts. Examples: + The position(offset, sequence or timestamp) where a receiver starts. Examples: Beginning of the event stream: - >>> offset = Offset("-1") + >>> event_pos = EventPosition("-1") End of the event stream: - >>> offset = Offset("@latest") + >>> event_pos = EventPosition("@latest") Events after the specified offset: - >>> offset = Offset("12345") + >>> event_pos = EventPosition("12345") Events from the specified offset: - >>> offset = Offset("12345", True) + >>> event_pos = EventPosition("12345", True) Events after a datetime: - >>> offset = Offset(datetime.datetime.utcnow()) + >>> event_pos = EventPosition(datetime.datetime.utcnow()) Events after a specific sequence number: - >>> offset = Offset(1506968696002) + >>> event_pos = EventPosition(1506968696002) """ def __init__(self, value, inclusive=False): @@ -299,10 +307,30 @@ def selector(self): if isinstance(self.value, datetime.datetime): timestamp = (calendar.timegm(self.value.utctimetuple()) * 1000) + (self.value.microsecond/1000) return ("amqp.annotation.x-opt-enqueued-time {} '{}'".format(operator, int(timestamp))).encode('utf-8') - if isinstance(self.value, six.integer_types): + elif isinstance(self.value, six.integer_types): return ("amqp.annotation.x-opt-sequence-number {} '{}'".format(operator, self.value)).encode('utf-8') return ("amqp.annotation.x-opt-offset {} '{}'".format(operator, self.value)).encode('utf-8') + @staticmethod + def from_start_of_stream(): + return EventPosition("-1") + + @staticmethod + def from_end_of_stream(): + return EventPosition("@latest") + + @staticmethod + def from_offset(offset, inclusive=False): + return EventPosition(offset, inclusive) + + @staticmethod + def from_sequence(sequence, inclusive=False): + return EventPosition(sequence, inclusive) + + @staticmethod + def from_enqueued_time(enqueued_time, inclusive=False): + return EventPosition(enqueued_time, inclusive) + class EventHubError(Exception): """ diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py new file mode 100644 index 000000000000..2d7a7be57638 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py @@ -0,0 +1,21 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +from uamqp.constants import TransportType + + +class Configuration(object): + def __init__(self, **kwargs): + self.user_agent = kwargs.get("user_agent") + self.max_retries = kwargs.get("max_retries", 3) + self.network_tracing = kwargs.get("debug", False) + self.http_proxy = kwargs.get("http_proxy") + self.auto_reconnect = kwargs.get("auto_reconnect", False) + self.keep_alive = kwargs.get("keep_alive", 0) + self.transport_type = TransportType.AmqpOverWebsocket if self.http_proxy \ + else kwargs.get("transport_type", TransportType.Amqp) + self.auth_timeout = kwargs.get("auth_timeout", 60) + self.prefetch = kwargs.get("prefetch") + self.send_timeout = kwargs.get("send_timeout", 60) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py index 486c75b3c682..4577d0332af5 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py @@ -51,11 +51,12 @@ def __init__(self, client, source, offset=None, prefetch=300, epoch=None, keep_a self.client = client self.source = source self.offset = offset + self.iter_started = False self.prefetch = prefetch self.epoch = epoch self.keep_alive = keep_alive self.auto_reconnect = auto_reconnect - self.retry_policy = errors.ErrorPolicy(max_retries=3, on_error=_error_handler) + self.retry_policy = errors.ErrorPolicy(max_retries=self.client.config.max_retries, on_error=_error_handler) self.reconnect_backoff = 1 self.properties = None self.redirected = None @@ -79,6 +80,52 @@ def __init__(self, client, source, offset=None, prefetch=300, epoch=None, keep_a client_name=self.name, properties=self.client.create_properties()) + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close(exc_val) + + def __iter__(self): + if not self.running: + self.open() + if not self.iter_started: + self.iter_started = True + self.messages_iter = self._handler.receive_messages_iter() + return self + + def __next__(self): + while True: + try: + message = next(self.messages_iter) + event_data = EventData(message=message) + self.offset = event_data.offset + return event_data + except (errors.TokenExpired, errors.AuthenticationException): + log.info("Receiver disconnected due to token error. Attempting reconnect.") + self.reconnect() + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("Receiver detached. Attempting reconnect.") + self.reconnect() + log.info("Receiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("Receiver detached. Attempting reconnect.") + self.reconnect() + log.info("Receiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Receive failed: {}".format(e)) + self.close(exception=error) + raise error + def open(self): """ Open the Receiver using the supplied conneciton. @@ -288,7 +335,7 @@ def receive(self, max_batch_size=None, timeout=None): if self.error: raise self.error if not self.running: - raise ValueError("Unable to receive until client has been started.") + self.open() data_batch = [] try: timeout_ms = 1000 * timeout if timeout else 0 diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py index 0a7334050a5f..ab113eac1c28 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py @@ -10,8 +10,9 @@ from uamqp import constants, errors from uamqp import SendClient +from uamqp.constants import MessageSendResult -from azure.eventhub.common import EventHubError, _error_handler +from azure.eventhub.common import EventHubError, EventData, _BatchSendEventData, _error_handler log = logging.getLogger(__name__) @@ -60,7 +61,7 @@ def __init__(self, client, target, partition=None, send_timeout=60, keep_alive=N self.error = None self.keep_alive = keep_alive self.auto_reconnect = auto_reconnect - self.retry_policy = errors.ErrorPolicy(max_retries=3, on_error=_error_handler) + self.retry_policy = errors.ErrorPolicy(max_retries=self.client.config.max_retries, on_error=_error_handler) self.reconnect_backoff = 1 self.name = "EHSender-{}".format(uuid.uuid4()) if partition: @@ -78,6 +79,12 @@ def __init__(self, client, target, partition=None, send_timeout=60, keep_alive=N self._outcome = None self._condition = None + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close(exc_val) + def open(self): """ Open the Sender using the supplied conneciton. @@ -181,28 +188,6 @@ def get_handler_state(self): # pylint: disable=protected-access return self._handler._message_sender.get_state() - def has_started(self): - """ - Whether the handler has completed all start up processes such as - establishing the connection, session, link and authentication, and - is not ready to process messages. - **This function is now deprecated and will be removed in v2.0+.** - - :rtype: bool - """ - # pylint: disable=protected-access - timeout = False - auth_in_progress = False - if self._handler._connection.cbs: - timeout, auth_in_progress = self._handler._auth.handle_token() - if timeout: - raise EventHubError("Authorization timeout.") - if auth_in_progress: - return False - if not self._handler._client_ready(): - return False - return True - def close(self, exception=None): """ Close down the handler. If the handler has already closed, @@ -235,37 +220,12 @@ def close(self, exception=None): self.error = EventHubError("This send handler is now closed.") self._handler.close() - def send(self, event_data): - """ - Sends an event data and blocks until acknowledgement is - received or operation times out. - - :param event_data: The event to be sent. - :type event_data: ~azure.eventhub.common.EventData - :raises: ~azure.eventhub.common.EventHubError if the message fails to - send. - :return: The outcome of the message send. - :rtype: ~uamqp.constants.MessageSendResult - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START eventhub_client_sync_send] - :end-before: [END eventhub_client_sync_send] - :language: python - :dedent: 4 - :caption: Sends an event data and blocks until acknowledgement is received or operation times out. - - """ - if self.error: - raise self.error + def _send_event_data(self, event_data): if not self.running: - raise ValueError("Unable to send until client has been started.") - if event_data.partition_key and self.partition: - raise ValueError("EventData partition key cannot be used with a partition sender.") - event_data.message.on_send_complete = self._on_outcome + self.open() try: self._handler.send_message(event_data.message) - if self._outcome != constants.MessageSendResult.Ok: + if self._outcome != MessageSendResult.Ok: raise Sender._error(self._outcome, self._condition) except errors.MessageException as failed: error = EventHubError(str(failed), failed) @@ -300,7 +260,76 @@ def send(self, event_data): else: return self._outcome - def transfer(self, event_data, callback=None): + def send(self, event_data): + """ + Sends an event data and blocks until acknowledgement is + received or operation times out. + + :param event_data: The event to be sent. + :type event_data: ~azure.eventhub.common.EventData + :raises: ~azure.eventhub.common.EventHubError if the message fails to + send. + :return: The outcome of the message send. + :rtype: ~uamqp.constants.MessageSendResult + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START eventhub_client_sync_send] + :end-before: [END eventhub_client_sync_send] + :language: python + :dedent: 4 + :caption: Sends an event data and blocks until acknowledgement is received or operation times out. + + """ + if self.error: + raise self.error + if event_data.partition_key and self.partition: + raise ValueError("EventData partition key cannot be used with a partition sender.") + event_data.message.on_send_complete = self._on_outcome + return self._send_event_data(event_data) + + def send_batch(self, batch_event_data): + """ + Sends an event data and blocks until acknowledgement is + received or operation times out. + + :param event_data: The event to be sent. + :type event_data: ~azure.eventhub.common.EventData + :raises: ~azure.eventhub.common.EventHubError if the message fails to + send. + :return: The outcome of the message send. + :rtype: ~uamqp.constants.MessageSendResult + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START eventhub_client_sync_send] + :end-before: [END eventhub_client_sync_send] + :language: python + :dedent: 4 + :caption: Sends an event data and blocks until acknowledgement is received or operation times out. + + """ + if self.error: + raise self.error + + def verify_partition(event_datas): + ed_iter = iter(event_datas) + try: + ed = next(ed_iter) + partition_key = ed.partition_key + yield ed + except StopIteration: + raise ValueError("batch_event_data must not be empty") + for ed in ed_iter: + if ed.partition_key != partition_key: + raise ValueError("partition key of all EventData must be the same if being sent in a batch") + yield ed + + wrapper_event_data = _BatchSendEventData(verify_partition(batch_event_data)) + wrapper_event_data.message.on_send_complete = self._on_outcome + return self._send_event_data(wrapper_event_data) + + def queue_message(self, event_data, callback=None): """ Transfers an event data and notifies the callback when the operation is done. @@ -322,14 +351,14 @@ def transfer(self, event_data, callback=None): if self.error: raise self.error if not self.running: - raise ValueError("Unable to send until client has been started.") + self.open() if event_data.partition_key and self.partition: raise ValueError("EventData partition key cannot be used with a partition sender.") if callback: event_data.message.on_send_complete = lambda o, c: callback(o, Sender._error(o, c)) self._handler.queue_message(event_data.message) - def wait(self): + def send_pending_messages(self): """ Wait until all transferred events have been sent. @@ -345,7 +374,7 @@ def wait(self): if self.error: raise self.error if not self.running: - raise ValueError("Unable to send until client has been started.") + self.open() try: self._handler.wait() except (errors.TokenExpired, errors.AuthenticationException): @@ -385,4 +414,4 @@ def _on_outcome(self, outcome, condition): @staticmethod def _error(outcome, condition): - return None if outcome == constants.MessageSendResult.Ok else EventHubError(outcome, condition) + return None if outcome == MessageSendResult.Ok else EventHubError(outcome, condition) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/eh_partition_pump.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/eh_partition_pump.py index e0aa25dc2e8d..d2c649f9a0a6 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/eh_partition_pump.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/eh_partition_pump.py @@ -5,7 +5,8 @@ import logging import asyncio -from azure.eventhub import Offset, EventHubClientAsync +from azure.eventhub import EventPosition +from azure.eventhub.aio import EventHubClient from azure.eventprocessorhost.partition_pump import PartitionPump @@ -64,14 +65,14 @@ async def open_clients_async(self): """ await self.partition_context.get_initial_offset_async() # Create event hub client and receive handler and set options - self.eh_client = EventHubClientAsync( + self.eh_client = EventHubClient( self.host.eh_config.client_address, debug=self.host.eph_options.debug_trace, http_proxy=self.host.eph_options.http_proxy) self.partition_receive_handler = self.eh_client.add_async_receiver( self.partition_context.consumer_group_name, self.partition_context.partition_id, - Offset(self.partition_context.offset), + EventPosition(self.partition_context.offset), prefetch=self.host.eph_options.prefetch_count, keep_alive=self.host.eph_options.keep_alive_interval, auto_reconnect=self.host.eph_options.auto_reconnect_on_error, diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/partition_manager.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/partition_manager.py index 41aaded73b56..d532846a5476 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/partition_manager.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/partition_manager.py @@ -8,7 +8,8 @@ from queue import Queue from collections import Counter -from azure.eventhub import EventHubClientAsync +from azure.eventhub.aio import EventHubClient + from azure.eventprocessorhost.eh_partition_pump import EventHubPartitionPump from azure.eventprocessorhost.cancellation_token import CancellationToken @@ -36,7 +37,7 @@ async def get_partition_ids_async(self): """ if not self.partition_ids: try: - eh_client = EventHubClientAsync( + eh_client = EventHubClient( self.host.eh_config.client_address, debug=self.host.eph_options.debug_trace, http_proxy=self.host.eph_options.http_proxy) diff --git a/sdk/eventhub/azure-eventhubs/conftest.py b/sdk/eventhub/azure-eventhubs/conftest.py index 237a60918f17..ce6f83adc6af 100644 --- a/sdk/eventhub/azure-eventhubs/conftest.py +++ b/sdk/eventhub/azure-eventhubs/conftest.py @@ -19,7 +19,7 @@ collect_ignore.append("examples/async_examples") else: sys.path.append(os.path.join(os.path.dirname(__file__), "tests")) - from asynctests import MockEventProcessor + from tests.asynctests import MockEventProcessor from azure.eventprocessorhost import EventProcessorHost from azure.eventprocessorhost import EventHubPartitionPump from azure.eventprocessorhost import AzureStorageCheckpointLeaseManager @@ -29,8 +29,7 @@ from azure.eventprocessorhost.partition_pump import PartitionPump from azure.eventprocessorhost.partition_manager import PartitionManager -from azure import eventhub -from azure.eventhub import EventHubClient, Receiver, Offset +from azure.eventhub import EventHubClient, Receiver, EventPosition def get_logger(filename, level=logging.INFO): @@ -71,7 +70,7 @@ def create_eventhub(eventhub_config, client=None): raise ValueError("EventHub creation failed.") -def cleanup_eventhub(servicebus_config, hub_name, client=None): +def cleanup_eventhub(eventhub_config, hub_name, client=None): from azure.servicebus.control_client import ServiceBusService client = client or ServiceBusService( service_namespace=eventhub_config['namespace'], @@ -166,36 +165,34 @@ def device_id(): @pytest.fixture() def connstr_receivers(connection_str): client = EventHubClient.from_connection_string(connection_str, debug=False) - eh_hub_info = client.get_eventhub_info() + eh_hub_info = client.get_eventhub_information() partitions = eh_hub_info["partition_ids"] - recv_offset = Offset("@latest") + recv_offset = EventPosition("@latest") receivers = [] for p in partitions: - receivers.append(client.add_receiver("$default", p, prefetch=500, offset=Offset("@latest"))) - - client.run() - - for r in receivers: - r.receive(timeout=1) + receiver = client.create_receiver("$default", p, prefetch=500, offset=EventPosition("@latest")) + receivers.append(receiver) + receiver.receive(timeout=1) yield connection_str, receivers - client.stop() + for r in receivers: + r.close() @pytest.fixture() def connstr_senders(connection_str): client = EventHubClient.from_connection_string(connection_str, debug=True) - eh_hub_info = client.get_eventhub_info() + eh_hub_info = client.get_eventhub_information() partitions = eh_hub_info["partition_ids"] senders = [] for p in partitions: - senders.append(client.add_sender(partition=p)) - - client.run() + sender = client.create_sender(partition=p) + senders.append(sender) yield connection_str, senders - client.stop() + for s in senders: + s.close() @pytest.fixture() diff --git a/sdk/eventhub/azure-eventhubs/setup.py b/sdk/eventhub/azure-eventhubs/setup.py index 034bdc14c115..1fdb12ec33d8 100644 --- a/sdk/eventhub/azure-eventhubs/setup.py +++ b/sdk/eventhub/azure-eventhubs/setup.py @@ -62,7 +62,7 @@ "tests", "tests.asynctests"]), install_requires=[ - 'uamqp~=1.1.0', + 'uamqp~=1.2.0', 'msrestazure>=0.4.32,<2.0.0', 'azure-common~=1.1', 'azure-storage-blob~=1.3' diff --git a/sdk/eventhub/azure-eventhubs/tests/__init__.py b/sdk/eventhub/azure-eventhubs/tests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py index 6b086ff8202c..1be11107dae0 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py @@ -9,18 +9,17 @@ import pytest import time -from azure import eventhub -from azure.eventhub import EventData, Offset, EventHubError, EventHubClientAsync +from azure.eventhub import EventData, EventPosition, EventHubError +from azure.eventhub.aio import EventHubClient @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_end_of_stream_async(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - receiver = client.add_async_receiver("$default", "0", offset=Offset('@latest')) - await client.run_async() - try: + client = EventHubClient.from_connection_string(connection_str, debug=False) + receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 senders[0].send(EventData(b"Receiving only a single event")) @@ -28,20 +27,15 @@ async def test_receive_end_of_stream_async(connstr_senders): assert len(received) == 1 assert list(received[-1].body)[0] == b"Receiving only a single event" - except: - raise - finally: - await client.stop_async() @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_with_offset_async(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - receiver = client.add_async_receiver("$default", "0", offset=Offset('@latest')) - await client.run_async() - try: + client = EventHubClient.from_connection_string(connection_str, debug=False) + receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 senders[0].send(EventData(b"Data")) @@ -50,27 +44,22 @@ async def test_receive_with_offset_async(connstr_senders): assert len(received) == 1 offset = received[0].offset - offset_receiver = client.add_async_receiver("$default", "0", offset=offset) - await client.run_async() - received = await offset_receiver.receive(timeout=5) - assert len(received) == 0 - senders[0].send(EventData(b"Message after offset")) - received = await offset_receiver.receive(timeout=5) - assert len(received) == 1 - except: - raise - finally: - await client.stop_async() + offset_receiver = client.create_receiver("$default", "0", offset=offset) + async with offset_receiver: + received = await offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message after offset")) + received = await offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_with_inclusive_offset_async(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - receiver = client.add_async_receiver("$default", "0", offset=Offset('@latest')) - await client.run_async() - try: + client = EventHubClient.from_connection_string(connection_str, debug=False) + receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 senders[0].send(EventData(b"Data")) @@ -79,24 +68,19 @@ async def test_receive_with_inclusive_offset_async(connstr_senders): assert len(received) == 1 offset = received[0].offset - offset_receiver = client.add_async_receiver("$default", "0", offset=Offset(offset.value, inclusive=True)) - await client.run_async() - received = await offset_receiver.receive(timeout=5) - assert len(received) == 1 - except: - raise - finally: - await client.stop_async() + offset_receiver = client.create_receiver("$default", "0", offset=EventPosition(offset.value, inclusive=True)) + async with offset_receiver: + received = await offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_with_datetime_async(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - receiver = client.add_async_receiver("$default", "0", offset=Offset('@latest')) - await client.run_async() - try: + client = EventHubClient.from_connection_string(connection_str, debug=False) + receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 senders[0].send(EventData(b"Data")) @@ -104,28 +88,23 @@ async def test_receive_with_datetime_async(connstr_senders): assert len(received) == 1 offset = received[0].enqueued_time - offset_receiver = client.add_async_receiver("$default", "0", offset=Offset(offset)) - await client.run_async() - received = await offset_receiver.receive(timeout=5) - assert len(received) == 0 - senders[0].send(EventData(b"Message after timestamp")) - time.sleep(1) - received = await offset_receiver.receive(timeout=5) - assert len(received) == 1 - except: - raise - finally: - await client.stop_async() + offset_receiver = client.create_receiver("$default", "0", offset=EventPosition(offset)) + async with offset_receiver: + received = await offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message after timestamp")) + time.sleep(1) + received = await offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_with_sequence_no_async(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - receiver = client.add_async_receiver("$default", "0", offset=Offset('@latest')) - await client.run_async() - try: + client = EventHubClient.from_connection_string(connection_str, debug=False) + receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 senders[0].send(EventData(b"Data")) @@ -133,28 +112,23 @@ async def test_receive_with_sequence_no_async(connstr_senders): assert len(received) == 1 offset = received[0].sequence_number - offset_receiver = client.add_async_receiver("$default", "0", offset=Offset(offset)) - await client.run_async() - received = await offset_receiver.receive(timeout=5) - assert len(received) == 0 - senders[0].send(EventData(b"Message next in sequence")) - time.sleep(1) - received = await offset_receiver.receive(timeout=5) - assert len(received) == 1 - except: - raise - finally: - await client.stop_async() + offset_receiver = client.create_receiver("$default", "0", offset=EventPosition(offset)) + async with offset_receiver: + received = await offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message next in sequence")) + time.sleep(1) + received = await offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_with_inclusive_sequence_no_async(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - receiver = client.add_async_receiver("$default", "0", offset=Offset('@latest')) - await client.run_async() - try: + client = EventHubClient.from_connection_string(connection_str, debug=False) + receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 senders[0].send(EventData(b"Data")) @@ -162,34 +136,25 @@ async def test_receive_with_inclusive_sequence_no_async(connstr_senders): assert len(received) == 1 offset = received[0].sequence_number - offset_receiver = client.add_async_receiver("$default", "0", offset=Offset(offset, inclusive=True)) - await client.run_async() - received = await offset_receiver.receive(timeout=5) - assert len(received) == 1 - except: - raise - finally: - await client.stop_async() + offset_receiver = client.create_receiver("$default", "0", offset=EventPosition(offset, inclusive=True)) + async with offset_receiver: + received = await offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_batch_async(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - receiver = client.add_async_receiver("$default", "0", prefetch=500, offset=Offset('@latest')) - await client.run_async() - try: + client = EventHubClient.from_connection_string(connection_str, debug=False) + receiver = client.create_receiver("$default", "0", prefetch=500, offset=EventPosition('@latest')) + async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 for i in range(10): senders[0].send(EventData(b"Data")) received = await receiver.receive(max_batch_size=5, timeout=5) assert len(received) == 5 - except: - raise - finally: - await client.stop_async() async def pump(receiver, sleep=None): @@ -213,22 +178,16 @@ async def test_epoch_receiver_async(connstr_senders): connection_str, senders = connstr_senders senders[0].send(EventData(b"Receiving only a single event")) - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, debug=False) receivers = [] for epoch in [10, 20]: - receivers.append(client.add_async_epoch_receiver("$default", "0", epoch, prefetch=5)) - try: - await client.run_async() - outputs = await asyncio.gather( - pump(receivers[0]), - pump(receivers[1]), - return_exceptions=True) - assert isinstance(outputs[0], EventHubError) - assert outputs[1] == 1 - except: - raise - finally: - await client.stop_async() + receivers.append(client.create_epoch_receiver("$default", "0", epoch, prefetch=5)) + outputs = await asyncio.gather( + pump(receivers[0]), + pump(receivers[1]), + return_exceptions=True) + assert isinstance(outputs[0], EventHubError) + assert outputs[1] == 1 @pytest.mark.liveTest @@ -237,15 +196,14 @@ async def test_multiple_receiver_async(connstr_senders): connection_str, senders = connstr_senders senders[0].send(EventData(b"Receiving only a single event")) - client = EventHubClientAsync.from_connection_string(connection_str, debug=True) - partitions = await client.get_eventhub_info_async() + client = EventHubClient.from_connection_string(connection_str, debug=True) + partitions = await client.get_eventhub_information() assert partitions["partition_ids"] == ["0", "1"] receivers = [] for i in range(2): - receivers.append(client.add_async_receiver("$default", "0", prefetch=10)) + receivers.append(client.create_receiver("$default", "0", prefetch=10)) try: - await client.run_async() - more_partitions = await client.get_eventhub_info_async() + more_partitions = await client.get_eventhub_information() assert more_partitions["partition_ids"] == ["0", "1"] outputs = await asyncio.gather( pump(receivers[0]), @@ -253,10 +211,9 @@ async def test_multiple_receiver_async(connstr_senders): return_exceptions=True) assert isinstance(outputs[0], int) and outputs[0] == 1 assert isinstance(outputs[1], int) and outputs[1] == 1 - except: - raise finally: - await client.stop_async() + for r in receivers: + await r.close() @pytest.mark.liveTest @@ -265,22 +222,20 @@ async def test_epoch_receiver_after_non_epoch_receiver_async(connstr_senders): connection_str, senders = connstr_senders senders[0].send(EventData(b"Receiving only a single event")) - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, debug=False) receivers = [] - receivers.append(client.add_async_receiver("$default", "0", prefetch=10)) - receivers.append(client.add_async_epoch_receiver("$default", "0", 15, prefetch=10)) + receivers.append(client.create_receiver("$default", "0", prefetch=10)) + receivers.append(client.create_epoch_receiver("$default", "0", 15, prefetch=10)) try: - await client.run_async() outputs = await asyncio.gather( pump(receivers[0]), pump(receivers[1], sleep=5), return_exceptions=True) assert isinstance(outputs[0], EventHubError) assert isinstance(outputs[1], int) and outputs[1] == 1 - except: - raise finally: - await client.stop_async() + for r in receivers: + await r.close() @pytest.mark.liveTest @@ -289,51 +244,48 @@ async def test_non_epoch_receiver_after_epoch_receiver_async(connstr_senders): connection_str, senders = connstr_senders senders[0].send(EventData(b"Receiving only a single event")) - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, debug=False) receivers = [] - receivers.append(client.add_async_epoch_receiver("$default", "0", 15, prefetch=10)) - receivers.append(client.add_async_receiver("$default", "0", prefetch=10)) + receivers.append(client.create_epoch_receiver("$default", "0", 15, prefetch=10)) + receivers.append(client.create_receiver("$default", "0", prefetch=10)) try: - await client.run_async() outputs = await asyncio.gather( pump(receivers[0]), pump(receivers[1]), return_exceptions=True) assert isinstance(outputs[1], EventHubError) assert isinstance(outputs[0], int) and outputs[0] == 1 - except: - raise finally: - await client.stop_async() + for r in receivers: + await r.close() @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_batch_with_app_prop_async(connstr_senders): - pytest.skip("Waiting on uAMQP release") + #pytest.skip("Waiting on uAMQP release") connection_str, senders = connstr_senders + app_prop_key = "raw_prop" + app_prop_value = "raw_value" + app_prop = {app_prop_key: app_prop_value} def batched(): for i in range(10): - yield "Event Data {}".format(i) + ed = EventData("Event Data {}".format(i)) + ed.application_properties = app_prop + yield ed for i in range(10, 20): - yield EventData("Event Data {}".format(i)) - - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - receiver = client.add_async_receiver("$default", "0", prefetch=500, offset=Offset('@latest')) - try: - await client.run_async() + ed = EventData("Event Data {}".format(i)) + ed.application_properties = app_prop + yield ed + client = EventHubClient.from_connection_string(connection_str, debug=False) + receiver = client.create_receiver("$default", "0", prefetch=500, offset=EventPosition('@latest')) + async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 - app_prop_key = "raw_prop" - app_prop_value = "raw_value" - batch_app_prop = {app_prop_key:app_prop_value} - batch_event = EventData(batch=batched()) - batch_event.application_properties = batch_app_prop - - senders[0].send(batch_event) + senders[0].send_batch(batched()) await asyncio.sleep(1) @@ -344,7 +296,3 @@ def batched(): assert list(message.body)[0] == "Event Data {}".format(index).encode('utf-8') assert (app_prop_key.encode('utf-8') in message.application_properties) \ and (dict(message.application_properties)[app_prop_key.encode('utf-8')] == app_prop_value.encode('utf-8')) - except: - raise - finally: - await client.stop_async() diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py index 917d7cde3b63..b17dad9cae2c 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py @@ -11,16 +11,16 @@ import time import json -from azure.eventhub import EventData, EventHubClientAsync +from azure.eventhub import EventData +from azure.eventhub.aio import EventHubClient @pytest.mark.liveTest @pytest.mark.asyncio async def test_send_with_partition_key_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - sender = client.add_async_sender() - await client.run_async() + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.create_sender() data_val = 0 for partition in [b"a", b"b", b"c", b"d", b"e", b"f"]: @@ -30,7 +30,6 @@ async def test_send_with_partition_key_async(connstr_receivers): data.partition_key = partition_key data_val += 1 await sender.send(data) - await client.stop_async() found_partition_keys = {} for index, partition in enumerate(receivers): @@ -47,15 +46,10 @@ async def test_send_with_partition_key_async(connstr_receivers): @pytest.mark.asyncio async def test_send_and_receive_zero_length_body_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - sender = client.add_async_sender() - try: - await client.run_async() + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.create_sender() + async with sender: await sender.send(EventData("")) - except: - raise - finally: - await client.stop_async() received = [] for r in receivers: @@ -69,15 +63,10 @@ async def test_send_and_receive_zero_length_body_async(connstr_receivers): @pytest.mark.asyncio async def test_send_single_event_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - sender = client.add_async_sender() - try: - await client.run_async() + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.create_sender() + async with sender: await sender.send(EventData(b"A single event")) - except: - raise - finally: - await client.stop_async() received = [] for r in receivers: @@ -93,17 +82,12 @@ async def test_send_batch_async(connstr_receivers): connection_str, receivers = connstr_receivers def batched(): for i in range(10): - yield "Event number {}".format(i) + yield EventData("Event number {}".format(i)) - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - sender = client.add_async_sender() - try: - await client.run_async() - await sender.send(EventData(batch=batched())) - except: - raise - finally: - await client.stop_async() + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.create_sender() + async with sender: + await sender.send_batch(batched()) time.sleep(1) received = [] @@ -119,15 +103,10 @@ def batched(): @pytest.mark.asyncio async def test_send_partition_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - sender = client.add_async_sender(partition="1") - try: - await client.run_async() + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.create_sender(partition="1") + async with sender: await sender.send(EventData(b"Data")) - except: - raise - finally: - await client.stop_async() partition_0 = receivers[0].receive(timeout=2) assert len(partition_0) == 0 @@ -139,16 +118,11 @@ async def test_send_partition_async(connstr_receivers): @pytest.mark.asyncio async def test_send_non_ascii_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - sender = client.add_async_sender(partition="0") - try: - await client.run_async() + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.create_sender(partition="0") + async with sender: await sender.send(EventData("é,è,à,ù,â,ê,î,ô,û")) await sender.send(EventData(json.dumps({"foo": "漢字"}))) - except: - raise - finally: - await client.stop_async() partition_0 = receivers[0].receive(timeout=2) assert len(partition_0) == 2 @@ -160,19 +134,15 @@ async def test_send_non_ascii_async(connstr_receivers): @pytest.mark.asyncio async def test_send_partition_batch_async(connstr_receivers): connection_str, receivers = connstr_receivers + def batched(): for i in range(10): - yield "Event number {}".format(i) + yield EventData("Event number {}".format(i)) - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - sender = client.add_async_sender(partition="1") - try: - await client.run_async() - await sender.send(EventData(batch=batched())) - except: - raise - finally: - await client.stop_async() + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.create_sender(partition="1") + async with sender: + await sender.send_batch(batched()) partition_0 = receivers[0].receive(timeout=2) assert len(partition_0) == 0 @@ -184,15 +154,10 @@ def batched(): @pytest.mark.asyncio async def test_send_array_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - sender = client.add_async_sender() - try: - await client.run_async() + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.create_sender() + async with sender: await sender.send(EventData([b"A", b"B", b"C"])) - except: - raise - finally: - await client.stop_async() received = [] for r in receivers: @@ -206,17 +171,12 @@ async def test_send_array_async(connstr_receivers): @pytest.mark.asyncio async def test_send_multiple_clients_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - sender_0 = client.add_async_sender(partition="0") - sender_1 = client.add_async_sender(partition="1") - try: - await client.run_async() + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender_0 = client.create_sender(partition="0") + sender_1 = client.create_sender(partition="1") + async with sender_0 and sender_1: await sender_0.send(EventData(b"Message 0")) await sender_1.send(EventData(b"Message 1")) - except: - raise - finally: - await client.stop_async() partition_0 = receivers[0].receive(timeout=2) assert len(partition_0) == 1 @@ -227,31 +187,26 @@ async def test_send_multiple_clients_async(connstr_receivers): @pytest.mark.liveTest @pytest.mark.asyncio async def test_send_batch_with_app_prop_async(connstr_receivers): - pytest.skip("Waiting on uAMQP release") + # pytest.skip("Waiting on uAMQP release") connection_str, receivers = connstr_receivers + app_prop_key = "raw_prop" + app_prop_value = "raw_value" + app_prop = {app_prop_key: app_prop_value} def batched(): for i in range(10): + ed = EventData("Event number {}".format(i)) + ed.application_properties = app_prop yield "Event number {}".format(i) for i in range(10, 20): - yield EventData("Event number {}".format(i)) + ed = EventData("Event number {}".format(i)) + ed.application_properties = app_prop + yield "Event number {}".format(i) - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - sender = client.add_async_sender() - try: - await client.run_async() - - app_prop_key = "raw_prop" - app_prop_value = "raw_value" - batch_app_prop = {app_prop_key:app_prop_value} - batch_event = EventData(batch=batched()) - batch_event.application_properties = batch_app_prop - - await sender.send(batch_event) - except: - raise - finally: - await client.stop_async() + client = EventHubClient.from_connection_string(connection_str, debug=False) + sender = client.create_sender() + async with sender: + await sender.send_batch(batched()) time.sleep(1) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_receive.py index 0b05bf78c842..51fbb3a6079a 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_receive.py @@ -9,14 +9,13 @@ import time import datetime -from azure import eventhub -from azure.eventhub import EventData, EventHubClient, Offset +from azure.eventhub import EventData, EventHubClient, EventPosition # def test_receive_without_events(connstr_senders): # connection_str, senders = connstr_senders # client = EventHubClient.from_connection_string(connection_str, debug=True) -# receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) +# receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) # finish = datetime.datetime.now() + datetime.timedelta(seconds=240) # count = 0 # try: @@ -38,10 +37,8 @@ def test_receive_end_of_stream(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) - try: - client.run() - + receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 senders[0].send(EventData(b"Receiving only a single event")) @@ -50,22 +47,17 @@ def test_receive_end_of_stream(connstr_senders): assert received[0].body_as_str() == "Receiving only a single event" assert list(received[-1].body)[0] == b"Receiving only a single event" - except: - raise - finally: - client.stop() @pytest.mark.liveTest def test_receive_with_offset_sync(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, debug=False) - partitions = client.get_eventhub_info() + partitions = client.get_eventhub_information() assert partitions["partition_ids"] == ["0", "1"] - receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) - try: - client.run() - more_partitions = client.get_eventhub_info() + receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + with receiver: + more_partitions = client.get_eventhub_information() assert more_partitions["partition_ids"] == ["0", "1"] received = receiver.receive(timeout=5) @@ -78,27 +70,22 @@ def test_receive_with_offset_sync(connstr_senders): assert list(received[0].body) == [b'Data'] assert received[0].body_as_str() == "Data" - offset_receiver = client.add_receiver("$default", "0", offset=offset) - client.run() - received = offset_receiver.receive(timeout=5) - assert len(received) == 0 - senders[0].send(EventData(b"Message after offset")) - received = offset_receiver.receive(timeout=5) - assert len(received) == 1 - except: - raise - finally: - client.stop() + offset_receiver = client.create_receiver("$default", "0", offset=offset) + with offset_receiver: + received = offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message after offset")) + received = offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest def test_receive_with_inclusive_offset(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) - try: - client.run() + receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 senders[0].send(EventData(b"Data")) @@ -110,26 +97,21 @@ def test_receive_with_inclusive_offset(connstr_senders): assert list(received[0].body) == [b'Data'] assert received[0].body_as_str() == "Data" - offset_receiver = client.add_receiver("$default", "0", offset=Offset(offset.value, inclusive=True)) - client.run() - received = offset_receiver.receive(timeout=5) - assert len(received) == 1 - except: - raise - finally: - client.stop() + offset_receiver = client.create_receiver("$default", "0", offset=EventPosition(offset.value, inclusive=True)) + with offset_receiver: + received = offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest def test_receive_with_datetime_sync(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, debug=False) - partitions = client.get_eventhub_info() + partitions = client.get_eventhub_information() assert partitions["partition_ids"] == ["0", "1"] - receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) - try: - client.run() - more_partitions = client.get_eventhub_info() + receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + with receiver: + more_partitions = client.get_eventhub_information() assert more_partitions["partition_ids"] == ["0", "1"] received = receiver.receive(timeout=5) assert len(received) == 0 @@ -141,17 +123,13 @@ def test_receive_with_datetime_sync(connstr_senders): assert list(received[0].body) == [b'Data'] assert received[0].body_as_str() == "Data" - offset_receiver = client.add_receiver("$default", "0", offset=Offset(offset)) - client.run() - received = offset_receiver.receive(timeout=5) - assert len(received) == 0 - senders[0].send(EventData(b"Message after timestamp")) - received = offset_receiver.receive(timeout=5) - assert len(received) == 1 - except: - raise - finally: - client.stop() + offset_receiver = client.create_receiver("$default", "0", offset=EventPosition(offset)) + with offset_receiver: + received = offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message after timestamp")) + received = offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest @@ -167,9 +145,8 @@ def test_receive_with_custom_datetime_sync(connstr_senders): for i in range(5): senders[0].send(EventData(b"Message after timestamp")) - receiver = client.add_receiver("$default", "0", offset=Offset(offset)) - try: - client.run() + receiver = client.create_receiver("$default", "0", offset=EventPosition(offset)) + with receiver: all_received = [] received = receiver.receive(timeout=1) while received: @@ -180,20 +157,14 @@ def test_receive_with_custom_datetime_sync(connstr_senders): for received_event in all_received: assert received_event.body_as_str() == "Message after timestamp" assert received_event.enqueued_time > offset - except: - raise - finally: - client.stop() @pytest.mark.liveTest def test_receive_with_sequence_no(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) - try: - client.run() - + receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 senders[0].send(EventData(b"Data")) @@ -202,91 +173,73 @@ def test_receive_with_sequence_no(connstr_senders): assert len(received) == 1 offset = received[0].sequence_number - offset_receiver = client.add_receiver("$default", "0", offset=Offset(offset)) - client.run() - received = offset_receiver.receive(timeout=5) - assert len(received) == 0 - senders[0].send(EventData(b"Message next in sequence")) - time.sleep(1) - received = offset_receiver.receive(timeout=5) - assert len(received) == 1 - except: - raise - finally: - client.stop() + offset_receiver = client.create_receiver("$default", "0", offset=EventPosition(offset)) + with offset_receiver: + received = offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message next in sequence")) + time.sleep(1) + received = offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest def test_receive_with_inclusive_sequence_no(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) - try: - client.run() - + receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 senders[0].send(EventData(b"Data")) received = receiver.receive(timeout=5) assert len(received) == 1 offset = received[0].sequence_number - - offset_receiver = client.add_receiver("$default", "0", offset=Offset(offset, inclusive=True)) - client.run() - received = offset_receiver.receive(timeout=5) - assert len(received) == 1 - except: - raise - finally: - client.stop() + offset_receiver = client.create_receiver("$default", "0", offset=EventPosition(offset, inclusive=True)) + with offset_receiver: + received = offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest def test_receive_batch(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.add_receiver("$default", "0", prefetch=500, offset=Offset('@latest')) - try: - client.run() - + receiver = client.create_receiver("$default", "0", prefetch=500, offset=EventPosition('@latest')) + with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 for i in range(10): senders[0].send(EventData(b"Data")) received = receiver.receive(max_batch_size=5, timeout=5) assert len(received) == 5 - except: - raise - finally: - client.stop() @pytest.mark.liveTest def test_receive_batch_with_app_prop_sync(connstr_senders): - pytest.skip("Waiting on uAMQP release") + #pytest.skip("Waiting on uAMQP release") connection_str, senders = connstr_senders + app_prop_key = "raw_prop" + app_prop_value = "raw_value" + batch_app_prop = {app_prop_key: app_prop_value} def batched(): for i in range(10): - yield "Event Data {}".format(i) + ed = EventData("Event Data {}".format(i)) + ed.application_properties = batch_app_prop + yield ed for i in range(10, 20): - yield EventData("Event Data {}".format(i)) + ed = EventData("Event Data {}".format(i)) + ed.application_properties = batch_app_prop + yield ed client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.add_receiver("$default", "0", prefetch=500, offset=Offset('@latest')) - try: - client.run() - + receiver = client.create_receiver("$default", "0", prefetch=500, offset=EventPosition('@latest')) + with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 - app_prop_key = "raw_prop" - app_prop_value = "raw_value" - batch_app_prop = {app_prop_key:app_prop_value} - batch_event = EventData(batch=batched()) - batch_event.application_properties = batch_app_prop - - senders[0].send(batch_event) + senders[0].send_batch(batched()) time.sleep(1) @@ -297,7 +250,4 @@ def batched(): assert list(message.body)[0] == "Event Data {}".format(index).encode('utf-8') assert (app_prop_key.encode('utf-8') in message.application_properties) \ and (dict(message.application_properties)[app_prop_key.encode('utf-8')] == app_prop_value.encode('utf-8')) - except: - raise - finally: - client.stop() + diff --git a/sdk/eventhub/azure-eventhubs/tests/test_send.py b/sdk/eventhub/azure-eventhubs/tests/test_send.py index f7a8ccc3b158..cdf1f0ebc6d0 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_send.py @@ -5,13 +5,11 @@ # license information. #-------------------------------------------------------------------------- -import os import pytest import time import json import sys -from azure import eventhub from azure.eventhub import EventData, EventHubClient @@ -19,10 +17,8 @@ def test_send_with_partition_key(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.add_sender() - try: - client.run() - + sender = client.create_sender() + with sender: data_val = 0 for partition in [b"a", b"b", b"c", b"d", b"e", b"f"]: partition_key = b"test_partition_" + partition @@ -31,10 +27,6 @@ def test_send_with_partition_key(connstr_receivers): data.partition_key = partition_key data_val += 1 sender.send(data) - except: - raise - finally: - client.stop() found_partition_keys = {} for index, partition in enumerate(receivers): @@ -53,15 +45,10 @@ def test_send_and_receive_large_body_size(connstr_receivers): pytest.skip("Skipping on OSX - open issue regarding message size") connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.add_sender() - try: - client.run() + sender = client.create_sender() + with sender: payload = 250 * 1024 sender.send(EventData("A" * payload)) - except: - raise - finally: - client.stop() received = [] for r in receivers: @@ -75,14 +62,9 @@ def test_send_and_receive_large_body_size(connstr_receivers): def test_send_and_receive_zero_length_body(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.add_sender() - try: - client.run() + sender = client.create_sender() + with sender: sender.send(EventData("")) - except: - raise - finally: - client.stop() received = [] for r in receivers: @@ -96,14 +78,9 @@ def test_send_and_receive_zero_length_body(connstr_receivers): def test_send_single_event(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.add_sender() - try: - client.run() + sender = client.create_sender() + with sender: sender.send(EventData(b"A single event")) - except: - raise - finally: - client.stop() received = [] for r in receivers: @@ -118,17 +95,12 @@ def test_send_batch_sync(connstr_receivers): connection_str, receivers = connstr_receivers def batched(): for i in range(10): - yield "Event number {}".format(i) + yield EventData("Event number {}".format(i)) client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.add_sender() - try: - client.run() - sender.send(EventData(batch=batched())) - except: - raise - finally: - client.stop() + sender = client.create_sender() + with sender: + sender.send_batch(batched()) time.sleep(1) received = [] @@ -144,14 +116,9 @@ def batched(): def test_send_partition(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.add_sender(partition="1") - try: - client.run() + sender = client.create_sender(partition="1") + with sender: sender.send(EventData(b"Data")) - except: - raise - finally: - client.stop() partition_0 = receivers[0].receive(timeout=2) assert len(partition_0) == 0 @@ -163,15 +130,10 @@ def test_send_partition(connstr_receivers): def test_send_non_ascii(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.add_sender(partition="0") - try: - client.run() + sender = client.create_sender(partition="0") + with sender: sender.send(EventData(u"é,è,à,ù,â,ê,î,ô,û")) sender.send(EventData(json.dumps({"foo": u"漢字"}))) - except: - raise - finally: - client.stop() partition_0 = receivers[0].receive(timeout=2) assert len(partition_0) == 2 @@ -184,18 +146,13 @@ def test_send_partition_batch(connstr_receivers): connection_str, receivers = connstr_receivers def batched(): for i in range(10): - yield "Event number {}".format(i) + yield EventData("Event number {}".format(i)) client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.add_sender(partition="1") - try: - client.run() - sender.send(EventData(batch=batched())) + sender = client.create_sender(partition="1") + with sender: + sender.send_batch(batched()) time.sleep(1) - except: - raise - finally: - client.stop() partition_0 = receivers[0].receive(timeout=2) assert len(partition_0) == 0 @@ -207,14 +164,9 @@ def batched(): def test_send_array_sync(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, debug=True) - sender = client.add_sender() - try: - client.run() + sender = client.create_sender() + with sender: sender.send(EventData([b"A", b"B", b"C"])) - except: - raise - finally: - client.stop() received = [] for r in receivers: @@ -228,16 +180,12 @@ def test_send_array_sync(connstr_receivers): def test_send_multiple_clients(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, debug=False) - sender_0 = client.add_sender(partition="0") - sender_1 = client.add_sender(partition="1") - try: - client.run() + sender_0 = client.create_sender(partition="0") + sender_1 = client.create_sender(partition="1") + with sender_0: sender_0.send(EventData(b"Message 0")) + with sender_1: sender_1.send(EventData(b"Message 1")) - except: - raise - finally: - client.stop() partition_0 = receivers[0].receive(timeout=2) assert len(partition_0) == 1 @@ -247,33 +195,27 @@ def test_send_multiple_clients(connstr_receivers): @pytest.mark.liveTest def test_send_batch_with_app_prop_sync(connstr_receivers): - pytest.skip("Waiting on uAMQP release") + #pytest.skip("Waiting on uAMQP release") connection_str, receivers = connstr_receivers + app_prop_key = "raw_prop" + app_prop_value = "raw_value" + app_prop = {app_prop_key: app_prop_value} + def batched(): for i in range(10): - yield "Event number {}".format(i) + ed = EventData("Event number {}".format(i)) + ed.application_properties = app_prop + yield ed for i in range(10, 20): - yield EventData("Event number {}".format(i)) + ed = EventData("Event number {}".format(i)) + ed.application_properties = app_prop + yield ed client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.add_sender() - try: - client.run() - - app_prop_key = "raw_prop" - app_prop_value = "raw_value" - batch_app_prop = {app_prop_key:app_prop_value} - batch_event = EventData(batch=batched()) - batch_event.application_properties = batch_app_prop - - sender.send(batch_event) - except: - raise - finally: - client.stop() - + sender = client.create_sender() + with sender: + sender.send_batch(batched()) time.sleep(1) - received = [] for r in receivers: received.extend(r.receive(timeout=3)) From 24219aa3b34a60bf01dd7da0ca68d51366383f5d Mon Sep 17 00:00:00 2001 From: Yijun Xie <48257664+YijunXieMS@users.noreply.github.com> Date: Mon, 3 Jun 2019 09:25:09 -0700 Subject: [PATCH 03/54] Eventhubs track2 python main issues (#5575) * Move to under sdk * Remove policies * Remove debugging files * Rename Offset to EventPosition * make tests a namespace package * Revised test receive for new code * Revised test send for track two * Update async code from sync * Revise async receive and send live test for track2 * Use uamqp 1.2 * Resolve code review feedback * add queue_message to async sender * send_batch receives both list and iterator * Update after adp review * send accepts EventData, list, iteratable * Event Hub Track 2 (#5) * Initial commit * Initial commit * Initial commit * event hub client * Update README.md * Update README.md Fix typos * Memory leak * Support timestamp filter * Support timestamp filter * Update README.md * Add sender and refactor * Added abstract classes Todo - Migrate Base Class Wireframes - Migrate Azure Classes * First draft of class wires directly ported from .net (might be some minor gaps) * send example * Set allowed sasl mechs * Remove client.py * Receiver update * Add dummy send api * logging updates * Error handling, reconnect and logging * Add app properties to event data * unbind transport on connection close * timestamp filter on py2 * module version * Reconnect once when link/session/connection close * Add SessionPolicy * Add client info * Updates - Cleaned wireframes to be PEP compliant - Implemented single partition pump and single event_hub partition pump scenario Todo - Add Unit Tests for partition pump and event hub partition pump - Implement Partition Manager - Implement Checkpointing and Lease Managment * Updates - Cleaned wireframes to be PEP compliant - Implemented single partition pump and single event_hub partition pump scenario Todo - Add Unit Tests for partition pump and event hub partition pump - Implement Partition Manager - Implement Checkpointing and Lease Managment * run client in non-blocking mode * Added unit testing * Implemented the following functionality - Azure_storage_checkpoint_manager - AzureBlobLease isExpired Todo Implement partition manager Implement partition context Test full implementation * Implemented Processing of First Epoh Todo - Fix lease bug that is breaking subsequent epochs * Changes - Completed End to End EPH Flow - Removed storage dependancy on downloading full blob to check lease state Todo - Add thread and queue for checking lease state and other storage operations - Ensure eventhub client shuts down properly - Find way to update partition pumps without restarting them - Other optimizations * Move examples out * Changes - Added thread pool executor to enable conncurent execution of partitions - Removed partition pump dependency on max_batch Todo - Ensure eventhub client shuts down properly (This is causing errors) - Add thread pool for making checkpoint code conccurent - Add thread and queue for checking lease state and other storage operations to enable async - Find way to reassign active partition pumps without restarting them - Other optimizations * Add async receive * Changes - Added logs - Fixed error causing client to prematurely shutdown * Manual link flow control for async receive * Workaround for stuck async receiver * Local variable names * Changes - Optimized logging and comments Todo - Add concurecny mechanim for azure storage - Depricate partition pump event queue and update to latest version of the client * Create Dockerfile * Stuck async receiver * credit keeps increasing in async receiver * Changes - Added asnyc event hub client support - Optimized logging and comments Todo - Add concurecny mechanim for azure storage * Updated docker file as requested * Added EPH example * Fix hardcoded HTTP header * Made suggested changes * Bug fix - Fixed event loop bugs. In windows eventloop is thread dependent but in ubuntu the eventloop is threadsafe so you need to differentiate the thread specific eventloop from the host one. * Updated loop naming convention to be consistent * Added option to pass asyncio event_loop to eph * Updated docker file * Fixed critical bug with partition manager and aquirec mechanisiims Todo : Identitfy and fix remaining bug that is causing all pumps to shut down when a second host starts * Bug fixes - Fixed bug where closing a pump closed a host - Fixed bug where error partitioned were not removed - Fixed bug where leases were renewed at an incorrect interval * Updated file headers Removed author reference * - Fixed bug in eph example that caused host to terminate prematurely - Made the lease renewal and checkpoint creation "multithreaded" * Increase the size of the connection pool The default connection pool size was too small for scenarios where multiple partitions were handled by one EventProcessorHost. If the amount of partitions handled is large, we might end up doing very many connections at the same time due to the multi-threaded blob-handling. For this reason, you might hit the OS limits that restrict the number of open files per process that in MacOS is not very big. This can be worked around with something like: `ulimit -n 2560` * Decrease info logging verbosity * added ability to toggle pump shutdown when all messages on a pump are processed. * Install also eventhubsprocessor * Default to keeping the pumps It is more optimal to keep the pumps alive even if there are no messages so that it is faster to pickup when messages start to arrive. * Pipe and event injector for Windows * Event injector updates * EHClient refactoring. EHClient leaks. Sender part 1. * Send support * ren eventhubsprocessor eventprocessorhost * Changes - Added event hub config to simplify installation story * Changes - Added optional eventprocessor_params for passing context to the event processor - Made the storage manager mandatatory * Fix memory leaks * logging * Fix: 1. process crash due to race in client stop and connection remote close. 2. handle client close in async receiver. 3. fail pending sends when sender is closed. 4. some debug logging. * tests * test: recv from multiple partitions * test utility * logging update * Support callback based send for high throughput * Workaroud memory issue in proton.reactor.ApplicationEvent * renamed eventprocessor to eventprocessorhost for consistency * updated docker file * fixed typo in url * Added amqp port to address * Updated sample documentation since url is auto encoded by config * Updated docs * Implement timeout for send * Async sender and example * Close injector pipe * Use send timer to also check queued messages * Add partition pump loop to partition_context This gives the EventProcessor access to the partition_pump loop object. This way if One desires to run synchronous code inside process_events_async one can utilize the loop object to run the synchronous code using await context.pump_loop.run_in_executor(None, bla) * Include details in send error * Release deliveries when sender is closed * added validation to unquoted sas key * added support for custom eventhub client prefetch size * Update README.md * Update README.md * Added Docker instructions and fixed Dockerfile (#18) * Removed Dockerfile from the main folder and fixed Dockerfile example * Added build and run Dockerfile documentation * Update Readme * Removed rm qpid-proton folder * Removed /usr/share copy * Disallow a sender/receiver to be registered more than once * Make everything async in EPH I have removed all usage of threads thoroughout the code. Using threads to run pumps etc. Causes async code written into the event-processor to become caotic (you need to follow which loop is currently being used in the call to prevent loops not being found or using the wrong loop (There is the main loop and then loops that are created inside threads) Things become caotic when the event processor is being called by objects that run under different loops. So, no Threading except usage of asyncio run_in_executor. This is done mostly for azure blob api calls. Also changed the bla_async methods to not block. this way, when calling open_async for the the event-processor-host, the command will exit once the EPH is started. Due to the above, see the edited example/eph.py where I added a monitor that makes sure the EPH is still running (Could be replaced by loop.run_forever()) in the example file I have also incorporated a test class for gracefully killing the EPH after 30 seconds. this works, nevertheless takes a while to close as we are waiting for timeouts on the eventhubs connections. * Started removing proton code * Removed most of proton _impl * Removed more code * Working sender * Updates to sender * Added some tests/samples * Some progress on clients * Fixed samples * Added azure namespace * #25 Partition key cannot be set for events * Updated version * Updated README * Renamed package to eventhub * Started EPH modifications * Updated imports * Fixed target urls * Updated logging * Updated async message receive * updated test imports * Added mgmt call to get eh info * Updated samples * Updated receive test * Added send and receive test clients * Updated uamqp dependency * Merged updates from dev * Fixed typos * Updated EPH sample * Started docstrings * Converted tests to pytest * Updates to batch receive * Started adding docstrings * More docstrings * bumped version * Started porting test suite * More tests and improvements * Moved eph tests * Some sample cleanup * Some test updates * Some test restructure * Docstring cleanup * Fixed some merge artifacts * Fixed formatting error * Removed delivery count * Nested package directory * Support custom URL suffix * Support custom URL suffix * Support for EventData device ID * Reverted nested directory * Updated release notes * Workaround for partitionkey * Finished partition key workaround * beta2 fixes * pylint fixes * Trigger CI * Test fixes * Added package manifest * Added warning for Python 2.7 support Support for issues #36 and #38 * Started adding scenario tests * More test scenarios * Better docstring formatting * Started iothub support * Fixed long running test * Fixed typo and memory leak * Restructure * IoThub support * Updates for RC1 release * Fix long running test * Docstring and sample cleanups * Working on error retry * Improved error processing * Fixed partition manager * Progress on IotHub error * Some test updates * Updated uamqp dependency * Restructure for independent connections * Added HTTP proxy support Fix for issue #41 * Fixed some tests + samples * pylint fixes * bumped version * Added keepalive config and some eph fixes * Made reconnect configurable * Added more EPH options * Bumped version * Pylint fix * Pylint fix * Added send and auth timeouts * Changed log formatting. Retry on reconnect * Pylint fixes * Renamed internal async module * Updated send example to match recv Fix for issue #56 * Added build badge to readme * Fix for repeat startup * Added more storage connect options to EPH * Bumped version * Handler blocked until client started * Added event data methods * Fix pylint * Fix 3.7 CI * Fix 3.7 CI * Updated pylint version * Pylint fixes * Updated README * Fixed readme badge refresh * Fixed bug in Azure namespace package * Updated manifest * Parse enqueued time as UTC Fixes #72. * Updates for release 1.2.0 (#81) * Made setup 2.7 compatible * Separated async tests * Support 2.7 types * Bumped version * Added non-ascii tests * Fix CI * Fix Py27 pylint * Added iot sample * Updated sender/receiver client opening * bumped version * Updated tests * Fixed test name * Fixed test env settings * Skip eph test * Updates for v1.3.0 (#91) * Added support for storing the state of the Event Processor along the Checkpoint. Both Checkpoint and the EP state are stored as pickled objects. * Fixing pylint complaints. * Switched from pickle back to JSON for lease persistence. * Fixes bug when accessing leases that don't contain EP context. Also, minor renaming. * Better SAS token support * Fixed pylint * Improved auth error handling * Test stabilization * Improved stored EPH context * Updated EPH context storing * Skip test on OSX * Skip tests on OSX Fail due to large message body bug. * Some cleanup * Fixed error handling * Improved SAS token parsing * Fixed datetime offset (#99) * Fixed datetime offset * Updated pylint * Removed 3.4 pylint pass * Fixed bug in error handling (#100) * Migrate event hub sdk to central repo 1. add verifiable code snippets into docstring 2. update readme according to the template 3. add livetest mark and config 4. optimize code layout/structure * 1. document formatting 2. separate async/sync example tests * Fix build error: 1. uamqp dependency mismatch 2. rename test_examples in eventhub to avoid mismatch * This should fix build error * remove tests import and add sys path to solve build error * add live test for sending BatchEvent with application_properties, new live test passed with new uamqp wheel locally installed * Add get_partition_info in Event Hub * add get_partition_info * Add telemetry information to the connection properties * Disable smart split in batch message * change epoch to exclusive_receiver_priority * fix small problem * remove uamqp dependency * Eventhub track2 (#6) * Initial commit * Initial commit * Initial commit * event hub client * Update README.md * Update README.md Fix typos * Memory leak * Support timestamp filter * Support timestamp filter * Update README.md * Add sender and refactor * Added abstract classes Todo - Migrate Base Class Wireframes - Migrate Azure Classes * First draft of class wires directly ported from .net (might be some minor gaps) * send example * Set allowed sasl mechs * Remove client.py * Receiver update * Add dummy send api * logging updates * Error handling, reconnect and logging * Add app properties to event data * unbind transport on connection close * timestamp filter on py2 * module version * Reconnect once when link/session/connection close * Add SessionPolicy * Add client info * Updates - Cleaned wireframes to be PEP compliant - Implemented single partition pump and single event_hub partition pump scenario Todo - Add Unit Tests for partition pump and event hub partition pump - Implement Partition Manager - Implement Checkpointing and Lease Managment * Updates - Cleaned wireframes to be PEP compliant - Implemented single partition pump and single event_hub partition pump scenario Todo - Add Unit Tests for partition pump and event hub partition pump - Implement Partition Manager - Implement Checkpointing and Lease Managment * run client in non-blocking mode * Added unit testing * Implemented the following functionality - Azure_storage_checkpoint_manager - AzureBlobLease isExpired Todo Implement partition manager Implement partition context Test full implementation * Implemented Processing of First Epoh Todo - Fix lease bug that is breaking subsequent epochs * Changes - Completed End to End EPH Flow - Removed storage dependancy on downloading full blob to check lease state Todo - Add thread and queue for checking lease state and other storage operations - Ensure eventhub client shuts down properly - Find way to update partition pumps without restarting them - Other optimizations * Move examples out * Changes - Added thread pool executor to enable conncurent execution of partitions - Removed partition pump dependency on max_batch Todo - Ensure eventhub client shuts down properly (This is causing errors) - Add thread pool for making checkpoint code conccurent - Add thread and queue for checking lease state and other storage operations to enable async - Find way to reassign active partition pumps without restarting them - Other optimizations * Add async receive * Changes - Added logs - Fixed error causing client to prematurely shutdown * Manual link flow control for async receive * Workaround for stuck async receiver * Local variable names * Changes - Optimized logging and comments Todo - Add concurecny mechanim for azure storage - Depricate partition pump event queue and update to latest version of the client * Create Dockerfile * Stuck async receiver * credit keeps increasing in async receiver * Changes - Added asnyc event hub client support - Optimized logging and comments Todo - Add concurecny mechanim for azure storage * Updated docker file as requested * Added EPH example * Fix hardcoded HTTP header * Made suggested changes * Bug fix - Fixed event loop bugs. In windows eventloop is thread dependent but in ubuntu the eventloop is threadsafe so you need to differentiate the thread specific eventloop from the host one. * Updated loop naming convention to be consistent * Added option to pass asyncio event_loop to eph * Updated docker file * Fixed critical bug with partition manager and aquirec mechanisiims Todo : Identitfy and fix remaining bug that is causing all pumps to shut down when a second host starts * Bug fixes - Fixed bug where closing a pump closed a host - Fixed bug where error partitioned were not removed - Fixed bug where leases were renewed at an incorrect interval * Updated file headers Removed author reference * - Fixed bug in eph example that caused host to terminate prematurely - Made the lease renewal and checkpoint creation "multithreaded" * Increase the size of the connection pool The default connection pool size was too small for scenarios where multiple partitions were handled by one EventProcessorHost. If the amount of partitions handled is large, we might end up doing very many connections at the same time due to the multi-threaded blob-handling. For this reason, you might hit the OS limits that restrict the number of open files per process that in MacOS is not very big. This can be worked around with something like: `ulimit -n 2560` * Decrease info logging verbosity * added ability to toggle pump shutdown when all messages on a pump are processed. * Install also eventhubsprocessor * Default to keeping the pumps It is more optimal to keep the pumps alive even if there are no messages so that it is faster to pickup when messages start to arrive. * Pipe and event injector for Windows * Event injector updates * EHClient refactoring. EHClient leaks. Sender part 1. * Send support * ren eventhubsprocessor eventprocessorhost * Changes - Added event hub config to simplify installation story * Changes - Added optional eventprocessor_params for passing context to the event processor - Made the storage manager mandatatory * Fix memory leaks * logging * Fix: 1. process crash due to race in client stop and connection remote close. 2. handle client close in async receiver. 3. fail pending sends when sender is closed. 4. some debug logging. * tests * test: recv from multiple partitions * test utility * logging update * Support callback based send for high throughput * Workaroud memory issue in proton.reactor.ApplicationEvent * renamed eventprocessor to eventprocessorhost for consistency * updated docker file * fixed typo in url * Added amqp port to address * Updated sample documentation since url is auto encoded by config * Updated docs * Implement timeout for send * Async sender and example * Close injector pipe * Use send timer to also check queued messages * Add partition pump loop to partition_context This gives the EventProcessor access to the partition_pump loop object. This way if One desires to run synchronous code inside process_events_async one can utilize the loop object to run the synchronous code using await context.pump_loop.run_in_executor(None, bla) * Include details in send error * Release deliveries when sender is closed * added validation to unquoted sas key * added support for custom eventhub client prefetch size * Update README.md * Update README.md * Added Docker instructions and fixed Dockerfile (#18) * Removed Dockerfile from the main folder and fixed Dockerfile example * Added build and run Dockerfile documentation * Update Readme * Removed rm qpid-proton folder * Removed /usr/share copy * Disallow a sender/receiver to be registered more than once * Make everything async in EPH I have removed all usage of threads thoroughout the code. Using threads to run pumps etc. Causes async code written into the event-processor to become caotic (you need to follow which loop is currently being used in the call to prevent loops not being found or using the wrong loop (There is the main loop and then loops that are created inside threads) Things become caotic when the event processor is being called by objects that run under different loops. So, no Threading except usage of asyncio run_in_executor. This is done mostly for azure blob api calls. Also changed the bla_async methods to not block. this way, when calling open_async for the the event-processor-host, the command will exit once the EPH is started. Due to the above, see the edited example/eph.py where I added a monitor that makes sure the EPH is still running (Could be replaced by loop.run_forever()) in the example file I have also incorporated a test class for gracefully killing the EPH after 30 seconds. this works, nevertheless takes a while to close as we are waiting for timeouts on the eventhubs connections. * Started removing proton code * Removed most of proton _impl * Removed more code * Working sender * Updates to sender * Added some tests/samples * Some progress on clients * Fixed samples * Added azure namespace * #25 Partition key cannot be set for events * Updated version * Updated README * Renamed package to eventhub * Started EPH modifications * Updated imports * Fixed target urls * Updated logging * Updated async message receive * updated test imports * Added mgmt call to get eh info * Updated samples * Updated receive test * Added send and receive test clients * Updated uamqp dependency * Merged updates from dev * Fixed typos * Updated EPH sample * Started docstrings * Converted tests to pytest * Updates to batch receive * Started adding docstrings * More docstrings * bumped version * Started porting test suite * More tests and improvements * Moved eph tests * Some sample cleanup * Some test updates * Some test restructure * Docstring cleanup * Fixed some merge artifacts * Fixed formatting error * Removed delivery count * Nested package directory * Support custom URL suffix * Support custom URL suffix * Support for EventData device ID * Reverted nested directory * Updated release notes * Workaround for partitionkey * Finished partition key workaround * beta2 fixes * pylint fixes * Trigger CI * Test fixes * Added package manifest * Added warning for Python 2.7 support Support for issues #36 and #38 * Started adding scenario tests * More test scenarios * Better docstring formatting * Started iothub support * Fixed long running test * Fixed typo and memory leak * Restructure * IoThub support * Updates for RC1 release * Fix long running test * Docstring and sample cleanups * Working on error retry * Improved error processing * Fixed partition manager * Progress on IotHub error * Some test updates * Updated uamqp dependency * Restructure for independent connections * Added HTTP proxy support Fix for issue #41 * Fixed some tests + samples * pylint fixes * bumped version * Added keepalive config and some eph fixes * Made reconnect configurable * Added more EPH options * Bumped version * Pylint fix * Pylint fix * Added send and auth timeouts * Changed log formatting. Retry on reconnect * Pylint fixes * Renamed internal async module * Updated send example to match recv Fix for issue #56 * Added build badge to readme * Fix for repeat startup * Added more storage connect options to EPH * Bumped version * Handler blocked until client started * Added event data methods * Fix pylint * Fix 3.7 CI * Fix 3.7 CI * Updated pylint version * Pylint fixes * Updated README * Fixed readme badge refresh * Fixed bug in Azure namespace package * Updated manifest * Parse enqueued time as UTC Fixes #72. * Updates for release 1.2.0 (#81) * Made setup 2.7 compatible * Separated async tests * Support 2.7 types * Bumped version * Added non-ascii tests * Fix CI * Fix Py27 pylint * Added iot sample * Updated sender/receiver client opening * bumped version * Updated tests * Fixed test name * Fixed test env settings * Skip eph test * Updates for v1.3.0 (#91) * Added support for storing the state of the Event Processor along the Checkpoint. Both Checkpoint and the EP state are stored as pickled objects. * Fixing pylint complaints. * Switched from pickle back to JSON for lease persistence. * Fixes bug when accessing leases that don't contain EP context. Also, minor renaming. * Better SAS token support * Fixed pylint * Improved auth error handling * Test stabilization * Improved stored EPH context * Updated EPH context storing * Skip test on OSX * Skip tests on OSX Fail due to large message body bug. * Some cleanup * Fixed error handling * Improved SAS token parsing * Fixed datetime offset (#99) * Fixed datetime offset * Updated pylint * Removed 3.4 pylint pass * Fixed bug in error handling (#100) * Migrate event hub sdk to central repo 1. add verifiable code snippets into docstring 2. update readme according to the template 3. add livetest mark and config 4. optimize code layout/structure * 1. document formatting 2. separate async/sync example tests * Fix build error: 1. uamqp dependency mismatch 2. rename test_examples in eventhub to avoid mismatch * This should fix build error * remove tests import and add sys path to solve build error * add live test for sending BatchEvent with application_properties, new live test passed with new uamqp wheel locally installed * Add get_partition_info in Event Hub * add get_partition_info * Add telemetry information to the connection properties * Disable smart split in batch message * 1. Add amqp over websocket test 2. Add proxy sample 3. Update some comment and code * Changes from cross-lang * Change debug to network_tracing * Sync Client Constructor * auto_reconnect True and keep_alive None * consumer_group $default * hide open() * partition -> partition_id * credentials -> credential in init * set running=true after opened * Eventhub track2 - Update livetest (#7) * Update README.md * Update README.md Fix typos * Memory leak * Support timestamp filter * Support timestamp filter * Update README.md * Add sender and refactor * Added abstract classes Todo - Migrate Base Class Wireframes - Migrate Azure Classes * First draft of class wires directly ported from .net (might be some minor gaps) * send example * Set allowed sasl mechs * Remove client.py * Receiver update * Add dummy send api * logging updates * Error handling, reconnect and logging * Add app properties to event data * unbind transport on connection close * timestamp filter on py2 * module version * Reconnect once when link/session/connection close * Add SessionPolicy * Add client info * Updates - Cleaned wireframes to be PEP compliant - Implemented single partition pump and single event_hub partition pump scenario Todo - Add Unit Tests for partition pump and event hub partition pump - Implement Partition Manager - Implement Checkpointing and Lease Managment * Updates - Cleaned wireframes to be PEP compliant - Implemented single partition pump and single event_hub partition pump scenario Todo - Add Unit Tests for partition pump and event hub partition pump - Implement Partition Manager - Implement Checkpointing and Lease Managment * run client in non-blocking mode * Added unit testing * Implemented the following functionality - Azure_storage_checkpoint_manager - AzureBlobLease isExpired Todo Implement partition manager Implement partition context Test full implementation * Implemented Processing of First Epoh Todo - Fix lease bug that is breaking subsequent epochs * Changes - Completed End to End EPH Flow - Removed storage dependancy on downloading full blob to check lease state Todo - Add thread and queue for checking lease state and other storage operations - Ensure eventhub client shuts down properly - Find way to update partition pumps without restarting them - Other optimizations * Move examples out * Changes - Added thread pool executor to enable conncurent execution of partitions - Removed partition pump dependency on max_batch Todo - Ensure eventhub client shuts down properly (This is causing errors) - Add thread pool for making checkpoint code conccurent - Add thread and queue for checking lease state and other storage operations to enable async - Find way to reassign active partition pumps without restarting them - Other optimizations * Add async receive * Changes - Added logs - Fixed error causing client to prematurely shutdown * Manual link flow control for async receive * Workaround for stuck async receiver * Local variable names * Changes - Optimized logging and comments Todo - Add concurecny mechanim for azure storage - Depricate partition pump event queue and update to latest version of the client * Create Dockerfile * Stuck async receiver * credit keeps increasing in async receiver * Changes - Added asnyc event hub client support - Optimized logging and comments Todo - Add concurecny mechanim for azure storage * Updated docker file as requested * Added EPH example * Fix hardcoded HTTP header * Made suggested changes * Bug fix - Fixed event loop bugs. In windows eventloop is thread dependent but in ubuntu the eventloop is threadsafe so you need to differentiate the thread specific eventloop from the host one. * Updated loop naming convention to be consistent * Added option to pass asyncio event_loop to eph * Updated docker file * Fixed critical bug with partition manager and aquirec mechanisiims Todo : Identitfy and fix remaining bug that is causing all pumps to shut down when a second host starts * Bug fixes - Fixed bug where closing a pump closed a host - Fixed bug where error partitioned were not removed - Fixed bug where leases were renewed at an incorrect interval * Updated file headers Removed author reference * - Fixed bug in eph example that caused host to terminate prematurely - Made the lease renewal and checkpoint creation "multithreaded" * Increase the size of the connection pool The default connection pool size was too small for scenarios where multiple partitions were handled by one EventProcessorHost. If the amount of partitions handled is large, we might end up doing very many connections at the same time due to the multi-threaded blob-handling. For this reason, you might hit the OS limits that restrict the number of open files per process that in MacOS is not very big. This can be worked around with something like: `ulimit -n 2560` * Decrease info logging verbosity * added ability to toggle pump shutdown when all messages on a pump are processed. * Install also eventhubsprocessor * Default to keeping the pumps It is more optimal to keep the pumps alive even if there are no messages so that it is faster to pickup when messages start to arrive. * Pipe and event injector for Windows * Event injector updates * EHClient refactoring. EHClient leaks. Sender part 1. * Send support * ren eventhubsprocessor eventprocessorhost * Changes - Added event hub config to simplify installation story * Changes - Added optional eventprocessor_params for passing context to the event processor - Made the storage manager mandatatory * Fix memory leaks * logging * Fix: 1. process crash due to race in client stop and connection remote close. 2. handle client close in async receiver. 3. fail pending sends when sender is closed. 4. some debug logging. * tests * test: recv from multiple partitions * test utility * logging update * Support callback based send for high throughput * Workaroud memory issue in proton.reactor.ApplicationEvent * renamed eventprocessor to eventprocessorhost for consistency * updated docker file * fixed typo in url * Added amqp port to address * Updated sample documentation since url is auto encoded by config * Updated docs * Implement timeout for send * Async sender and example * Close injector pipe * Use send timer to also check queued messages * Add partition pump loop to partition_context This gives the EventProcessor access to the partition_pump loop object. This way if One desires to run synchronous code inside process_events_async one can utilize the loop object to run the synchronous code using await context.pump_loop.run_in_executor(None, bla) * Include details in send error * Release deliveries when sender is closed * added validation to unquoted sas key * added support for custom eventhub client prefetch size * Update README.md * Update README.md * Added Docker instructions and fixed Dockerfile (#18) * Removed Dockerfile from the main folder and fixed Dockerfile example * Added build and run Dockerfile documentation * Update Readme * Removed rm qpid-proton folder * Removed /usr/share copy * Disallow a sender/receiver to be registered more than once * Make everything async in EPH I have removed all usage of threads thoroughout the code. Using threads to run pumps etc. Causes async code written into the event-processor to become caotic (you need to follow which loop is currently being used in the call to prevent loops not being found or using the wrong loop (There is the main loop and then loops that are created inside threads) Things become caotic when the event processor is being called by objects that run under different loops. So, no Threading except usage of asyncio run_in_executor. This is done mostly for azure blob api calls. Also changed the bla_async methods to not block. this way, when calling open_async for the the event-processor-host, the command will exit once the EPH is started. Due to the above, see the edited example/eph.py where I added a monitor that makes sure the EPH is still running (Could be replaced by loop.run_forever()) in the example file I have also incorporated a test class for gracefully killing the EPH after 30 seconds. this works, nevertheless takes a while to close as we are waiting for timeouts on the eventhubs connections. * Started removing proton code * Removed most of proton _impl * Removed more code * Working sender * Updates to sender * Added some tests/samples * Some progress on clients * Fixed samples * Added azure namespace * #25 Partition key cannot be set for events * Updated version * Updated README * Renamed package to eventhub * Started EPH modifications * Updated imports * Fixed target urls * Updated logging * Updated async message receive * updated test imports * Added mgmt call to get eh info * Updated samples * Updated receive test * Added send and receive test clients * Updated uamqp dependency * Merged updates from dev * Fixed typos * Updated EPH sample * Started docstrings * Converted tests to pytest * Updates to batch receive * Started adding docstrings * More docstrings * bumped version * Started porting test suite * More tests and improvements * Moved eph tests * Some sample cleanup * Some test updates * Some test restructure * Docstring cleanup * Fixed some merge artifacts * Fixed formatting error * Removed delivery count * Nested package directory * Support custom URL suffix * Support custom URL suffix * Support for EventData device ID * Reverted nested directory * Updated release notes * Workaround for partitionkey * Finished partition key workaround * beta2 fixes * pylint fixes * Trigger CI * Test fixes * Added package manifest * Added warning for Python 2.7 support Support for issues #36 and #38 * Started adding scenario tests * More test scenarios * Better docstring formatting * Started iothub support * Fixed long running test * Fixed typo and memory leak * Restructure * IoThub support * Updates for RC1 release * Fix long running test * Docstring and sample cleanups * Working on error retry * Improved error processing * Fixed partition manager * Progress on IotHub error * Some test updates * Updated uamqp dependency * Restructure for independent connections * Added HTTP proxy support Fix for issue #41 * Fixed some tests + samples * pylint fixes * bumped version * Added keepalive config and some eph fixes * Made reconnect configurable * Added more EPH options * Bumped version * Pylint fix * Pylint fix * Added send and auth timeouts * Changed log formatting. Retry on reconnect * Pylint fixes * Renamed internal async module * Updated send example to match recv Fix for issue #56 * Added build badge to readme * Fix for repeat startup * Added more storage connect options to EPH * Bumped version * Handler blocked until client started * Added event data methods * Fix pylint * Fix 3.7 CI * Fix 3.7 CI * Updated pylint version * Pylint fixes * Updated README * Fixed readme badge refresh * Fixed bug in Azure namespace package * Updated manifest * Parse enqueued time as UTC Fixes #72. * Updates for release 1.2.0 (#81) * Made setup 2.7 compatible * Separated async tests * Support 2.7 types * Bumped version * Added non-ascii tests * Fix CI * Fix Py27 pylint * Added iot sample * Updated sender/receiver client opening * bumped version * Updated tests * Fixed test name * Fixed test env settings * Skip eph test * Updates for v1.3.0 (#91) * Added support for storing the state of the Event Processor along the Checkpoint. Both Checkpoint and the EP state are stored as pickled objects. * Fixing pylint complaints. * Switched from pickle back to JSON for lease persistence. * Fixes bug when accessing leases that don't contain EP context. Also, minor renaming. * Better SAS token support * Fixed pylint * Improved auth error handling * Test stabilization * Improved stored EPH context * Updated EPH context storing * Skip test on OSX * Skip tests on OSX Fail due to large message body bug. * Some cleanup * Fixed error handling * Improved SAS token parsing * Fixed datetime offset (#99) * Fixed datetime offset * Updated pylint * Removed 3.4 pylint pass * Fixed bug in error handling (#100) * Migrate event hub sdk to central repo 1. add verifiable code snippets into docstring 2. update readme according to the template 3. add livetest mark and config 4. optimize code layout/structure * 1. document formatting 2. separate async/sync example tests * Fix build error: 1. uamqp dependency mismatch 2. rename test_examples in eventhub to avoid mismatch * This should fix build error * remove tests import and add sys path to solve build error * add live test for sending BatchEvent with application_properties, new live test passed with new uamqp wheel locally installed * Add get_partition_info in Event Hub * add get_partition_info * Add telemetry information to the connection properties * Disable smart split in batch message * 1. Add amqp over websocket test 2. Add proxy sample 3. Update some comment and code * update some test code * Add __str__ to EventData * Update test code * Add eh error classes * EventHubError extends AzureError * Fix EventPosition default value issue * change $default to $Default * Handle TokenAuthError * wait for ready in _reconnect * fix get_partition_ids issue * Fix reconnect issue * small fix * fix async live test * Eventhub track2 Live test update (#8) * Set allowed sasl mechs * Remove client.py * Receiver update * Add dummy send api * logging updates * Error handling, reconnect and logging * Add app properties to event data * unbind transport on connection close * timestamp filter on py2 * module version * Reconnect once when link/session/connection close * Add SessionPolicy * Add client info * Updates - Cleaned wireframes to be PEP compliant - Implemented single partition pump and single event_hub partition pump scenario Todo - Add Unit Tests for partition pump and event hub partition pump - Implement Partition Manager - Implement Checkpointing and Lease Managment * Updates - Cleaned wireframes to be PEP compliant - Implemented single partition pump and single event_hub partition pump scenario Todo - Add Unit Tests for partition pump and event hub partition pump - Implement Partition Manager - Implement Checkpointing and Lease Managment * run client in non-blocking mode * Added unit testing * Implemented the following functionality - Azure_storage_checkpoint_manager - AzureBlobLease isExpired Todo Implement partition manager Implement partition context Test full implementation * Implemented Processing of First Epoh Todo - Fix lease bug that is breaking subsequent epochs * Changes - Completed End to End EPH Flow - Removed storage dependancy on downloading full blob to check lease state Todo - Add thread and queue for checking lease state and other storage operations - Ensure eventhub client shuts down properly - Find way to update partition pumps without restarting them - Other optimizations * Move examples out * Changes - Added thread pool executor to enable conncurent execution of partitions - Removed partition pump dependency on max_batch Todo - Ensure eventhub client shuts down properly (This is causing errors) - Add thread pool for making checkpoint code conccurent - Add thread and queue for checking lease state and other storage operations to enable async - Find way to reassign active partition pumps without restarting them - Other optimizations * Add async receive * Changes - Added logs - Fixed error causing client to prematurely shutdown * Manual link flow control for async receive * Workaround for stuck async receiver * Local variable names * Changes - Optimized logging and comments Todo - Add concurecny mechanim for azure storage - Depricate partition pump event queue and update to latest version of the client * Create Dockerfile * Stuck async receiver * credit keeps increasing in async receiver * Changes - Added asnyc event hub client support - Optimized logging and comments Todo - Add concurecny mechanim for azure storage * Updated docker file as requested * Added EPH example * Fix hardcoded HTTP header * Made suggested changes * Bug fix - Fixed event loop bugs. In windows eventloop is thread dependent but in ubuntu the eventloop is threadsafe so you need to differentiate the thread specific eventloop from the host one. * Updated loop naming convention to be consistent * Added option to pass asyncio event_loop to eph * Updated docker file * Fixed critical bug with partition manager and aquirec mechanisiims Todo : Identitfy and fix remaining bug that is causing all pumps to shut down when a second host starts * Bug fixes - Fixed bug where closing a pump closed a host - Fixed bug where error partitioned were not removed - Fixed bug where leases were renewed at an incorrect interval * Updated file headers Removed author reference * - Fixed bug in eph example that caused host to terminate prematurely - Made the lease renewal and checkpoint creation "multithreaded" * Increase the size of the connection pool The default connection pool size was too small for scenarios where multiple partitions were handled by one EventProcessorHost. If the amount of partitions handled is large, we might end up doing very many connections at the same time due to the multi-threaded blob-handling. For this reason, you might hit the OS limits that restrict the number of open files per process that in MacOS is not very big. This can be worked around with something like: `ulimit -n 2560` * Decrease info logging verbosity * added ability to toggle pump shutdown when all messages on a pump are processed. * Install also eventhubsprocessor * Default to keeping the pumps It is more optimal to keep the pumps alive even if there are no messages so that it is faster to pickup when messages start to arrive. * Pipe and event injector for Windows * Event injector updates * EHClient refactoring. EHClient leaks. Sender part 1. * Send support * ren eventhubsprocessor eventprocessorhost * Changes - Added event hub config to simplify installation story * Changes - Added optional eventprocessor_params for passing context to the event processor - Made the storage manager mandatatory * Fix memory leaks * logging * Fix: 1. process crash due to race in client stop and connection remote close. 2. handle client close in async receiver. 3. fail pending sends when sender is closed. 4. some debug logging. * tests * test: recv from multiple partitions * test utility * logging update * Support callback based send for high throughput * Workaroud memory issue in proton.reactor.ApplicationEvent * renamed eventprocessor to eventprocessorhost for consistency * updated docker file * fixed typo in url * Added amqp port to address * Updated sample documentation since url is auto encoded by config * Updated docs * Implement timeout for send * Async sender and example * Close injector pipe * Use send timer to also check queued messages * Add partition pump loop to partition_context This gives the EventProcessor access to the partition_pump loop object. This way if One desires to run synchronous code inside process_events_async one can utilize the loop object to run the synchronous code using await context.pump_loop.run_in_executor(None, bla) * Include details in send error * Release deliveries when sender is closed * added validation to unquoted sas key * added support for custom eventhub client prefetch size * Update README.md * Update README.md * Added Docker instructions and fixed Dockerfile (#18) * Removed Dockerfile from the main folder and fixed Dockerfile example * Added build and run Dockerfile documentation * Update Readme * Removed rm qpid-proton folder * Removed /usr/share copy * Disallow a sender/receiver to be registered more than once * Make everything async in EPH I have removed all usage of threads thoroughout the code. Using threads to run pumps etc. Causes async code written into the event-processor to become caotic (you need to follow which loop is currently being used in the call to prevent loops not being found or using the wrong loop (There is the main loop and then loops that are created inside threads) Things become caotic when the event processor is being called by objects that run under different loops. So, no Threading except usage of asyncio run_in_executor. This is done mostly for azure blob api calls. Also changed the bla_async methods to not block. this way, when calling open_async for the the event-processor-host, the command will exit once the EPH is started. Due to the above, see the edited example/eph.py where I added a monitor that makes sure the EPH is still running (Could be replaced by loop.run_forever()) in the example file I have also incorporated a test class for gracefully killing the EPH after 30 seconds. this works, nevertheless takes a while to close as we are waiting for timeouts on the eventhubs connections. * Started removing proton code * Removed most of proton _impl * Removed more code * Working sender * Updates to sender * Added some tests/samples * Some progress on clients * Fixed samples * Added azure namespace * #25 Partition key cannot be set for events * Updated version * Updated README * Renamed package to eventhub * Started EPH modifications * Updated imports * Fixed target urls * Updated logging * Updated async message receive * updated test imports * Added mgmt call to get eh info * Updated samples * Updated receive test * Added send and receive test clients * Updated uamqp dependency * Merged updates from dev * Fixed typos * Updated EPH sample * Started docstrings * Converted tests to pytest * Updates to batch receive * Started adding docstrings * More docstrings * bumped version * Started porting test suite * More tests and improvements * Moved eph tests * Some sample cleanup * Some test updates * Some test restructure * Docstring cleanup * Fixed some merge artifacts * Fixed formatting error * Removed delivery count * Nested package directory * Support custom URL suffix * Support custom URL suffix * Support for EventData device ID * Reverted nested directory * Updated release notes * Workaround for partitionkey * Finished partition key workaround * beta2 fixes * pylint fixes * Trigger CI * Test fixes * Added package manifest * Added warning for Python 2.7 support Support for issues #36 and #38 * Started adding scenario tests * More test scenarios * Better docstring formatting * Started iothub support * Fixed long running test * Fixed typo and memory leak * Restructure * IoThub support * Updates for RC1 release * Fix long running test * Docstring and sample cleanups * Working on error retry * Improved error processing * Fixed partition manager * Progress on IotHub error * Some test updates * Updated uamqp dependency * Restructure for independent connections * Added HTTP proxy support Fix for issue #41 * Fixed some tests + samples * pylint fixes * bumped version * Added keepalive config and some eph fixes * Made reconnect configurable * Added more EPH options * Bumped version * Pylint fix * Pylint fix * Added send and auth timeouts * Changed log formatting. Retry on reconnect * Pylint fixes * Renamed internal async module * Updated send example to match recv Fix for issue #56 * Added build badge to readme * Fix for repeat startup * Added more storage connect options to EPH * Bumped version * Handler blocked until client started * Added event data methods * Fix pylint * Fix 3.7 CI * Fix 3.7 CI * Updated pylint version * Pylint fixes * Updated README * Fixed readme badge refresh * Fixed bug in Azure namespace package * Updated manifest * Parse enqueued time as UTC Fixes #72. * Updates for release 1.2.0 (#81) * Made setup 2.7 compatible * Separated async tests * Support 2.7 types * Bumped version * Added non-ascii tests * Fix CI * Fix Py27 pylint * Added iot sample * Updated sender/receiver client opening * bumped version * Updated tests * Fixed test name * Fixed test env settings * Skip eph test * Updates for v1.3.0 (#91) * Added support for storing the state of the Event Processor along the Checkpoint. Both Checkpoint and the EP state are stored as pickled objects. * Fixing pylint complaints. * Switched from pickle back to JSON for lease persistence. * Fixes bug when accessing leases that don't contain EP context. Also, minor renaming. * Better SAS token support * Fixed pylint * Improved auth error handling * Test stabilization * Improved stored EPH context * Updated EPH context storing * Skip test on OSX * Skip tests on OSX Fail due to large message body bug. * Some cleanup * Fixed error handling * Improved SAS token parsing * Fixed datetime offset (#99) * Fixed datetime offset * Updated pylint * Removed 3.4 pylint pass * Fixed bug in error handling (#100) * Migrate event hub sdk to central repo 1. add verifiable code snippets into docstring 2. update readme according to the template 3. add livetest mark and config 4. optimize code layout/structure * 1. document formatting 2. separate async/sync example tests * Fix build error: 1. uamqp dependency mismatch 2. rename test_examples in eventhub to avoid mismatch * This should fix build error * remove tests import and add sys path to solve build error * add live test for sending BatchEvent with application_properties, new live test passed with new uamqp wheel locally installed * Add get_partition_info in Event Hub * add get_partition_info * Add telemetry information to the connection properties * Disable smart split in batch message * 1. Add amqp over websocket test 2. Add proxy sample 3. Update some comment and code * update some test code * Add __str__ to EventData * Update test code * Update event position * Update live test * Update reconnect live test * Update too large data size * debug->network_tracing * Negative test fix * Remove partition_key, send with batching_label * Fix review problems * Fix a log issue * fix get_partition_properties bug * add client properties live test * Revised setup.py for track 2 --- .../azure/eventhub/__init__.py | 16 +- .../eventhub/aio/event_hubs_client_async.py | 152 ++++++------ .../azure/eventhub/aio/receiver_async.py | 204 ++++++++-------- .../azure/eventhub/aio/sender_async.py | 200 ++++++---------- .../azure-eventhubs/azure/eventhub/client.py | 128 +++++++---- .../azure/eventhub/client_abstract.py | 118 ++++------ .../azure-eventhubs/azure/eventhub/common.py | 174 +++++++------- .../azure/eventhub/configuration.py | 10 +- .../azure/eventhub/constants.py | 11 + .../azure-eventhubs/azure/eventhub/error.py | 108 +++++++++ .../azure/eventhub/receiver.py | 209 +++++++++-------- .../azure-eventhubs/azure/eventhub/sender.py | 217 ++++++------------ sdk/eventhub/azure-eventhubs/conftest.py | 13 +- .../azure-eventhubs/examples/proxy.py | 57 +++++ sdk/eventhub/azure-eventhubs/setup.py | 30 ++- .../asynctests/test_iothub_receive_async.py | 40 ++-- .../tests/asynctests/test_longrunning_eph.py | 27 +-- .../test_longrunning_eph_with_context.py | 26 +-- .../test_longrunning_receive_async.py | 76 +++--- .../asynctests/test_longrunning_send_async.py | 48 ++-- .../tests/asynctests/test_negative_async.py | 169 +++++++------- .../tests/asynctests/test_properties_async.py | 45 ++++ .../tests/asynctests/test_receive_async.py | 106 +++++---- .../tests/asynctests/test_reconnect_async.py | 54 ++--- .../tests/asynctests/test_send_async.py | 87 ++++--- .../tests/test_iothub_receive.py | 14 +- .../azure-eventhubs/tests/test_iothub_send.py | 11 +- .../tests/test_longrunning_receive.py | 17 +- .../tests/test_longrunning_send.py | 13 +- .../azure-eventhubs/tests/test_negative.py | 125 +++++----- .../azure-eventhubs/tests/test_properties.py | 41 ++++ .../azure-eventhubs/tests/test_receive.py | 86 ++++--- .../azure-eventhubs/tests/test_reconnect.py | 47 +--- .../azure-eventhubs/tests/test_send.py | 74 ++++-- 34 files changed, 1473 insertions(+), 1280 deletions(-) create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/constants.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/error.py create mode 100644 sdk/eventhub/azure-eventhubs/examples/proxy.py create mode 100644 sdk/eventhub/azure-eventhubs/tests/asynctests/test_properties_async.py create mode 100644 sdk/eventhub/azure-eventhubs/tests/test_properties.py diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py index e2bcc43ed877..9766b6816ab8 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py @@ -5,21 +5,29 @@ __version__ = "1.3.1" -from azure.eventhub.common import EventData, EventHubError, EventPosition +from azure.eventhub.common import EventData, EventPosition +from azure.eventhub.error import EventHubError, EventDataError, ConnectError, AuthenticationError from azure.eventhub.client import EventHubClient from azure.eventhub.sender import Sender from azure.eventhub.receiver import Receiver -from uamqp.constants import MessageSendResult -from uamqp.constants import TransportType +from .constants import MessageSendResult +from .constants import TransportType +from .common import FIRST_AVAILABLE, NEW_EVENTS_ONLY, SharedKeyCredentials, SASTokenCredentials __all__ = [ + "__version__", "EventData", "EventHubError", + "ConnectError", + "EventDataError", + "AuthenticationError", "EventPosition", "EventHubClient", "Sender", "Receiver", "MessageSendResult", "TransportType", + "FIRST_AVAILABLE", "NEW_EVENTS_ONLY", + "SharedKeyCredentials", + "SASTokenCredentials", ] - diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py index 275f76f6ee62..d88461c98d0c 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py @@ -15,7 +15,7 @@ AMQPClientAsync, ) -from azure.eventhub.common import parse_sas_token +from azure.eventhub.common import parse_sas_token, SharedKeyCredentials, SASTokenCredentials from azure.eventhub import ( EventHubError) from ..client_abstract import EventHubClientAbstract @@ -55,17 +55,19 @@ def _create_auth(self, username=None, password=None): http_proxy = self.config.http_proxy transport_type = self.config.transport_type auth_timeout = self.config.auth_timeout - if self.aad_credential and self.sas_token: - raise ValueError("Can't have both sas_token and aad_credential") - elif self.aad_credential: - get_jwt_token = functools.partial(self.aad_credential.get_token, ['https://eventhubs.azure.net//.default']) - # TODO: should use async aad_credential.get_token. Check with Charles for async identity api - return authentication.JWTTokenAsync(self.auth_uri, self.auth_uri, - get_jwt_token, http_proxy=http_proxy, - transport_type=transport_type) - elif self.sas_token: - token = self.sas_token() if callable(self.sas_token) else self.sas_token + if isinstance(self.credential, SharedKeyCredentials): + username = username or self._auth_config['username'] + password = password or self._auth_config['password'] + if "@sas.root" in username: + return authentication.SASLPlain( + self.host, username, password, http_proxy=http_proxy, transport_type=transport_type) + return authentication.SASTokenAsync.from_shared_access_key( + self.auth_uri, username, password, timeout=auth_timeout, http_proxy=http_proxy, + transport_type=transport_type) + + elif isinstance(self.credential, SASTokenCredentials): + token = self.credential.get_sas_token() try: expiry = int(parse_sas_token(token)['se']) except (KeyError, TypeError, IndexError): @@ -77,15 +79,14 @@ def _create_auth(self, username=None, password=None): http_proxy=http_proxy, transport_type=transport_type) - username = username or self._auth_config['username'] - password = password or self._auth_config['password'] - if "@sas.root" in username: - return authentication.SASLPlain( - self.address.hostname, username, password, http_proxy=http_proxy, transport_type=transport_type) - return authentication.SASTokenAsync.from_shared_access_key( - self.auth_uri, username, password, timeout=auth_timeout, http_proxy=http_proxy, transport_type=transport_type) + else: + get_jwt_token = functools.partial(self.credential.get_token, ['https://eventhubs.azure.net//.default']) + return authentication.JWTTokenAsync(self.auth_uri, self.auth_uri, + get_jwt_token, http_proxy=http_proxy, + transport_type=transport_type) + - async def get_eventhub_information(self): + async def get_properties(self): """ Get details on the specified EventHub async. @@ -108,18 +109,67 @@ async def get_eventhub_information(self): eh_info = response.get_data() output = {} if eh_info: - output['name'] = eh_info[b'name'].decode('utf-8') - output['type'] = eh_info[b'type'].decode('utf-8') - output['created_at'] = datetime.datetime.fromtimestamp(float(eh_info[b'created_at'])/1000) - output['partition_count'] = eh_info[b'partition_count'] + output['path'] = eh_info[b'name'].decode('utf-8') + output['created_at'] = datetime.datetime.utcfromtimestamp(float(eh_info[b'created_at'])/1000) output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']] return output finally: await mgmt_client.close_async() + + async def get_partition_ids(self): + return (await self.get_properties())['partition_ids'] + + async def get_partition_properties(self, partition): + """ + Get information on the specified partition async. + Keys in the details dictionary include: + + -'name' + -'type' + -'partition' + -'begin_sequence_number' + -'last_enqueued_sequence_number' + -'last_enqueued_offset' + -'last_enqueued_time_utc' + -'is_partition_empty' + + :param partition: The target partition id. + :type partition: str + :rtype: dict + """ + alt_creds = { + "username": self._auth_config.get("iot_username"), + "password": self._auth_config.get("iot_password")} + try: + mgmt_auth = self._create_auth(**alt_creds) + mgmt_client = AMQPClientAsync(self.mgmt_target, auth=mgmt_auth, debug=self.debug) + await mgmt_client.open_async() + mgmt_msg = Message(application_properties={'name': self.eh_name, + 'partition': partition}) + response = await mgmt_client.mgmt_request_async( + mgmt_msg, + constants.READ_OPERATION, + op_type=b'com.microsoft:partition', + status_code_field=b'status-code', + description_fields=b'status-description') + partition_info = response.get_data() + output = {} + if partition_info: + output['event_hub_path'] = partition_info[b'name'].decode('utf-8') + output['id'] = partition_info[b'partition'].decode('utf-8') + output['beginning_sequence_number'] = partition_info[b'begin_sequence_number'] + output['last_enqueued_sequence_number'] = partition_info[b'last_enqueued_sequence_number'] + output['last_enqueued_offset'] = partition_info[b'last_enqueued_offset'].decode('utf-8') + output['last_enqueued_time_utc'] = datetime.datetime.utcfromtimestamp( + float(partition_info[b'last_enqueued_time_utc'] / 1000)) + output['is_empty'] = partition_info[b'is_partition_empty'] + return output + finally: + await mgmt_client.close_async() def create_receiver( - self, consumer_group, partition, offset=None, epoch=None, operation=None, - prefetch=None, keep_alive=None, auto_reconnect=None, loop=None): + self, partition_id, consumer_group="$Default", event_position=None, exclusive_receiver_priority=None, operation=None, + prefetch=None, loop=None): """ Add an async receiver to the client for a particular consumer group and partition. @@ -127,8 +177,8 @@ def create_receiver( :type consumer_group: str :param partition: The ID of the partition. :type partition: str - :param offset: The offset from which to start receiving. - :type offset: ~azure.eventhub.common.Offset + :param event_position: The position from which to start receiving. + :type event_position: ~azure.eventhub.common.EventPosition :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int :operation: An optional operation to be appended to the hostname in the source URL. @@ -145,53 +195,18 @@ def create_receiver( :caption: Add an async receiver to the client for a particular consumer group and partition. """ - keep_alive = self.config.keep_alive if keep_alive is None else keep_alive - auto_reconnect = self.config.auto_reconnect if auto_reconnect is None else auto_reconnect prefetch = self.config.prefetch if prefetch is None else prefetch path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( - self.address.hostname, path, consumer_group, partition) + self.address.hostname, path, consumer_group, partition_id) handler = Receiver( - self, source_url, offset=offset, epoch=epoch, prefetch=prefetch, keep_alive=keep_alive, - auto_reconnect=auto_reconnect, loop=loop) + self, source_url, offset=event_position, exclusive_receiver_priority=exclusive_receiver_priority, + prefetch=prefetch, loop=loop) return handler - def create_epoch_receiver( - self, consumer_group, partition, epoch, prefetch=300, operation=None, loop=None): - """ - Add an async receiver to the client with an epoch value. Only a single epoch receiver - can connect to a partition at any given time - additional epoch receivers must have - a higher epoch value or they will be rejected. If a 2nd epoch receiver has - connected, the first will be closed. - - :param consumer_group: The name of the consumer group. - :type consumer_group: str - :param partition: The ID of the partition. - :type partition: str - :param epoch: The epoch value for the receiver. - :type epoch: int - :param prefetch: The message prefetch count of the receiver. Default is 300. - :type prefetch: int - :operation: An optional operation to be appended to the hostname in the source URL. - The value must start with `/` character. - :type operation: str - :rtype: ~azure.eventhub.aio.receiver_async.ReceiverAsync - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START create_eventhub_client_async_epoch_receiver] - :end-before: [END create_eventhub_client_async_epoch_receiver] - :language: python - :dedent: 4 - :caption: Add an async receiver to the client with an epoch value. - - """ - return self.create_receiver(consumer_group, partition, epoch=epoch, prefetch=prefetch, - operation=operation, loop=loop) - def create_sender( - self, partition=None, operation=None, send_timeout=None, keep_alive=None, auto_reconnect=None, loop=None): + self, partition_id=None, operation=None, send_timeout=None, loop=None): """ Add an async sender to the client to send ~azure.eventhub.common.EventData object to an EventHub. @@ -229,10 +244,7 @@ def create_sender( if operation: target = target + operation send_timeout = self.config.send_timeout if send_timeout is None else send_timeout - keep_alive = self.config.keep_alive if keep_alive is None else keep_alive - auto_reconnect = self.config.auto_reconnect if auto_reconnect is None else auto_reconnect handler = Sender( - self, target, partition=partition, send_timeout=send_timeout, - keep_alive=keep_alive, auto_reconnect=auto_reconnect, loop=loop) + self, target, partition=partition_id, send_timeout=send_timeout, loop=loop) return handler diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py index aafe4c8dcd20..6614001dc93d 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py @@ -11,7 +11,7 @@ from uamqp import ReceiveClientAsync, Source from azure.eventhub import EventHubError, EventData -from azure.eventhub.common import _error_handler +from azure.eventhub.error import EventHubError, AuthenticationError, ConnectError, _error_handler log = logging.getLogger(__name__) @@ -33,8 +33,8 @@ class Receiver(object): _epoch = b'com.microsoft:epoch' def __init__( # pylint: disable=super-init-not-called - self, client, source, offset=None, prefetch=300, epoch=None, - keep_alive=None, auto_reconnect=False, loop=None): + self, client, source, offset=None, prefetch=300, exclusive_receiver_priority=None, + keep_alive=None, auto_reconnect=True, loop=None): """ Instantiate an async receiver. @@ -54,8 +54,9 @@ def __init__( # pylint: disable=super-init-not-called self.client = client self.source = source self.offset = offset + self.messages_iter = None self.prefetch = prefetch - self.epoch = epoch + self.exclusive_receiver_priority = exclusive_receiver_priority self.keep_alive = keep_alive self.auto_reconnect = auto_reconnect self.retry_policy = errors.ErrorPolicy(max_retries=self.client.config.max_retries, on_error=_error_handler) @@ -68,8 +69,8 @@ def __init__( # pylint: disable=super-init-not-called source = Source(self.source) if self.offset is not None: source.set_filter(self.offset.selector()) - if epoch: - self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))} + if exclusive_receiver_priority: + self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(exclusive_receiver_priority))} self._handler = ReceiveClientAsync( source, auth=self.client.get_auth(), @@ -80,7 +81,7 @@ def __init__( # pylint: disable=super-init-not-called error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties(), + properties=self.client.create_properties(self.client.config.user_agent), loop=self.loop) async def __aenter__(self): @@ -90,12 +91,14 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): await self.close(exc_val) def __aiter__(self): - self.messages_iter = self._handler.receive_messages_iter_async() return self async def __anext__(self): + await self._open() while True: try: + if not self.messages_iter: + self.messages_iter = self._handler.receive_messages_iter_async() message = await self.messages_iter.__anext__() event_data = EventData(message=message) self.offset = event_data.offset @@ -107,25 +110,32 @@ async def __anext__(self): if shutdown.action.retry and self.auto_reconnect: log.info("Receiver detached. Attempting reconnect.") await self.reconnect() - log.info("Receiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close(exception=error) - raise error + else: + log.info("Receiver detached. Shutting down.") + error = ConnectError(str(shutdown), shutdown) + await self.close(exception=error) + raise error except errors.MessageHandlerError as shutdown: if self.auto_reconnect: log.info("Receiver detached. Attempting reconnect.") await self.reconnect() - log.info("Receiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close(exception=error) - raise error + else: + log.info("Receiver detached. Shutting down.") + error = ConnectError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except StopAsyncIteration: + raise + except asyncio.CancelledError: + # TODO: stop self.message_iter + raise except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) error = EventHubError("Receive failed: {}".format(e)) await self.close(exception=error) raise error - async def open(self): + async def _open(self): """ Open the Receiver using the supplied conneciton. If the handler has previously been redirected, the redirect @@ -144,7 +154,6 @@ async def open(self): """ # pylint: disable=protected-access - self.running = True if self.redirected: self.source = self.redirected.address source = Source(self.source) @@ -156,18 +165,46 @@ async def open(self): self._handler = ReceiveClientAsync( source, auth=self.client.get_auth(**alt_creds), - debug=self.client.debug, + debug=self.client.config.network_tracing, prefetch=self.prefetch, link_properties=self.properties, timeout=self.timeout, error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties(), + properties=self.client.create_properties(self.client.config.user_agent), loop=self.loop) - await self._handler.open_async() - while not await self._handler.client_ready_async(): - await asyncio.sleep(0.05) + + if not self.running: + try: + await self._handler.open_async() + self.running = True + while not await self._handler.client_ready_async(): + await asyncio.sleep(0.05) + except errors.AuthenticationException: + log.info("Receiver failed authentication. Retrying...") + await self.reconnect() + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("Receiver detached. Attempting reconnect.") + await self.reconnect() + else: + log.info("Receiver detached. Failed to connect") + error = ConnectError(str(shutdown), shutdown) + raise error + except errors.AMQPConnectionError as shutdown: + if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: + log.info("Receiver couldn't authenticate (%r).", shutdown) + error = AuthenticationError(str(shutdown)) + raise error + else: + log.info("Receiver connection error (%r).", shutdown) + error = ConnectError(str(shutdown)) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r)", e) + error = EventHubError("Receiver connect failed: {}".format(e)) + raise error async def _reconnect(self): # pylint: disable=too-many-statements # pylint: disable=protected-access @@ -181,23 +218,24 @@ async def _reconnect(self): # pylint: disable=too-many-statements self._handler = ReceiveClientAsync( source, auth=self.client.get_auth(**alt_creds), - debug=self.client.debug, + debug=self.client.config.network_tracing, prefetch=self.prefetch, link_properties=self.properties, timeout=self.timeout, error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties(), + properties=self.client.create_properties(self.client.config.user_agent), loop=self.loop) + self.messages_iter = None try: await self._handler.open_async() while not await self._handler.client_ready_async(): await asyncio.sleep(0.05) return True - except errors.TokenExpired as shutdown: + except errors.AuthenticationException as shutdown: log.info("AsyncReceiver disconnected due to token expiry. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + error = AuthenticationError(str(shutdown), shutdown) await self.close(exception=error) raise error except (errors.LinkDetach, errors.ConnectionClose) as shutdown: @@ -205,7 +243,7 @@ async def _reconnect(self): # pylint: disable=too-many-statements log.info("AsyncReceiver detached. Attempting reconnect.") return False log.info("AsyncReceiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + error = ConnectError(str(shutdown), shutdown) await self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: @@ -213,7 +251,7 @@ async def _reconnect(self): # pylint: disable=too-many-statements log.info("AsyncReceiver detached. Attempting reconnect.") return False log.info("AsyncReceiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + error = ConnectError(str(shutdown), shutdown) await self.close(exception=error) raise error except errors.AMQPConnectionError as shutdown: @@ -221,7 +259,7 @@ async def _reconnect(self): # pylint: disable=too-many-statements log.info("AsyncReceiver couldn't authenticate. Attempting reconnect.") return False log.info("AsyncReceiver connection error (%r). Shutting down.", shutdown) - error = EventHubError(str(shutdown)) + error = ConnectError(str(shutdown)) await self.close(exception=error) raise error except Exception as e: @@ -233,7 +271,7 @@ async def _reconnect(self): # pylint: disable=too-many-statements async def reconnect(self): """If the Receiver was disconnected from the service with a retryable error - attempt to reconnect.""" - while not await self._reconnect_async(): + while not await self._reconnect(): await asyncio.sleep(self.reconnect_backoff) async def close(self, exception=None): @@ -263,13 +301,25 @@ async def close(self, exception=None): elif isinstance(exception, EventHubError): self.error = exception elif isinstance(exception, (errors.LinkDetach, errors.ConnectionClose)): - self.error = EventHubError(str(exception), exception) + self.error = ConnectError(str(exception), exception) elif exception: self.error = EventHubError(str(exception)) else: self.error = EventHubError("This receive handler is now closed.") await self._handler.close_async() + @property + def queue_size(self): + """ + The current size of the unprocessed Event queue. + + :rtype: int + """ + # pylint: disable=protected-access + if self._handler._received_messages: + return self._handler._received_messages.qsize() + return 0 + async def receive(self, max_batch_size=None, timeout=None): """ Receive events asynchronously from the EventHub. @@ -294,83 +344,41 @@ async def receive(self, max_batch_size=None, timeout=None): """ if self.error: raise self.error - if not self.running: - await self.open() - data_batch = [] - try: - timeout_ms = 1000 * timeout if timeout else 0 - message_batch = await self._handler.receive_message_batch_async( - max_batch_size=max_batch_size, - timeout=timeout_ms) - for message in message_batch: - event_data = EventData(message=message) - self.offset = event_data.offset - data_batch.append(event_data) - return data_batch - except (errors.TokenExpired, errors.AuthenticationException): - log.info("AsyncReceiver disconnected due to token error. Attempting reconnect.") - await self.reconnect() - return data_batch - except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("AsyncReceiver detached. Attempting reconnect.") - await self.reconnect() - return data_batch - log.info("AsyncReceiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close(exception=error) - raise error - except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("AsyncReceiver detached. Attempting reconnect.") - await self.reconnect() - return data_batch - log.info("AsyncReceiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close(exception=error) - raise error - except Exception as e: - log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Receive failed: {}".format(e)) - await self.close(exception=error) - raise error - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc_val, exc_tb): - await self.close(exc_val) + await self._open() - def __aiter__(self): - self.messages_iter = self._handler.receive_messages_iter_async() - return self - - async def __anext__(self): + data_batch = [] while True: try: - message = await self.messages_iter.__anext__() - event_data = EventData(message=message) - self.offset = event_data.offset - return event_data + timeout_ms = 1000 * timeout if timeout else 0 + message_batch = await self._handler.receive_message_batch_async( + max_batch_size=max_batch_size, + timeout=timeout_ms) + for message in message_batch: + event_data = EventData(message=message) + self.offset = event_data.offset + data_batch.append(event_data) + return data_batch except (errors.TokenExpired, errors.AuthenticationException): - log.info("Receiver disconnected due to token error. Attempting reconnect.") + log.info("AsyncReceiver disconnected due to token error. Attempting reconnect.") await self.reconnect() except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry and self.auto_reconnect: - log.info("Receiver detached. Attempting reconnect.") + log.info("AsyncReceiver detached. Attempting reconnect.") await self.reconnect() - log.info("Receiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close(exception=error) - raise error + else: + log.info("AsyncReceiver detached. Shutting down.") + error = ConnectError(str(shutdown), shutdown) + await self.close(exception=error) + raise error except errors.MessageHandlerError as shutdown: if self.auto_reconnect: - log.info("Receiver detached. Attempting reconnect.") + log.info("AsyncReceiver detached. Attempting reconnect.") await self.reconnect() - log.info("Receiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close(exception=error) - raise error + else: + log.info("AsyncReceiver detached. Shutting down.") + error = ConnectError(str(shutdown), shutdown) + await self.close(exception=error) + raise error except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) error = EventHubError("Receive failed: {}".format(e)) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py index 0ef46d519579..e263131ff859 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py @@ -12,7 +12,9 @@ from azure.eventhub import MessageSendResult from azure.eventhub import EventHubError -from azure.eventhub.common import _error_handler, _BatchSendEventData +from azure.eventhub.common import EventData, _BatchSendEventData +from azure.eventhub.error import EventHubError, ConnectError, \ + AuthenticationError, EventDataError, _error_handler log = logging.getLogger(__name__) @@ -33,7 +35,7 @@ class Sender(object): def __init__( # pylint: disable=super-init-not-called self, client, target, partition=None, send_timeout=60, - keep_alive=None, auto_reconnect=False, loop=None): + keep_alive=None, auto_reconnect=True, loop=None): """ Instantiate an EventHub event SenderAsync handler. @@ -74,12 +76,12 @@ def __init__( # pylint: disable=super-init-not-called self._handler = SendClientAsync( self.target, auth=self.client.get_auth(), - debug=self.client.debug, + debug=self.client.config.network_tracing, msg_timeout=self.timeout, error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties(), + properties=self.client.create_properties(self.client.config.user_agent), loop=self.loop) self._outcome = None self._condition = None @@ -90,7 +92,7 @@ async def __aenter__(self): async def __aexit__(self, exc_type, exc_val, exc_tb): await self.close(exc_val) - async def open(self): + async def _open(self): """ Open the Sender using the supplied conneciton. If the handler has previously been redirected, the redirect @@ -108,22 +110,48 @@ async def open(self): :caption: Open the Sender using the supplied conneciton. """ - self.running = True if self.redirected: self.target = self.redirected.address self._handler = SendClientAsync( self.target, auth=self.client.get_auth(), - debug=self.client.debug, + debug=self.client.config.network_tracing, msg_timeout=self.timeout, error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties(), + properties=self.client.create_properties(self.client.config.user_agent), loop=self.loop) - await self._handler.open_async() - while not await self._handler.client_ready_async(): - await asyncio.sleep(0.05) + if not self.running: + try: + await self._handler.open_async() + self.running = True + while not await self._handler.client_ready_async(): + await asyncio.sleep(0.05) + except errors.AuthenticationException: + log.info("Sender failed authentication. Retrying...") + await self.reconnect() + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("Sender detached. Attempting reconnect.") + await self.reconnect() + else: + log.info("Sender detached. Failed to connect") + error = ConnectError(str(shutdown), shutdown) + raise error + except errors.AMQPConnectionError as shutdown: + if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: + log.info("Sender couldn't authenticate.", shutdown) + error = AuthenticationError(str(shutdown)) + raise error + else: + log.info("Sender connection error (%r).", shutdown) + error = ConnectError(str(shutdown)) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r)", e) + error = EventHubError("Sender connect failed: {}".format(e)) + raise error async def _reconnect(self): await self._handler.close_async() @@ -131,21 +159,23 @@ async def _reconnect(self): self._handler = SendClientAsync( self.target, auth=self.client.get_auth(), - debug=self.client.debug, + debug=self.client.config.network_tracing, msg_timeout=self.timeout, error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties(), + properties=self.client.create_properties(self.client.config.user_agent), loop=self.loop) try: await self._handler.open_async() + while not await self._handler.client_ready_async(): + await asyncio.sleep(0.05) self._handler.queue_message(*unsent_events) await self._handler.wait_async() return True - except errors.TokenExpired as shutdown: + except errors.AuthenticationException as shutdown: log.info("AsyncSender disconnected due to token expiry. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + error = AuthenticationError(str(shutdown), shutdown) await self.close(exception=error) raise error except (errors.LinkDetach, errors.ConnectionClose) as shutdown: @@ -153,7 +183,7 @@ async def _reconnect(self): log.info("AsyncSender detached. Attempting reconnect.") return False log.info("AsyncSender reconnect failed. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + error = ConnectError(str(shutdown), shutdown) await self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: @@ -161,7 +191,7 @@ async def _reconnect(self): log.info("AsyncSender detached. Attempting reconnect.") return False log.info("AsyncSender reconnect failed. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + error = ConnectError(str(shutdown), shutdown) await self.close(exception=error) raise error except errors.AMQPConnectionError as shutdown: @@ -169,7 +199,7 @@ async def _reconnect(self): log.info("AsyncSender couldn't authenticate. Attempting reconnect.") return False log.info("AsyncSender connection error (%r). Shutting down.", shutdown) - error = EventHubError(str(shutdown)) + error = ConnectError(str(shutdown)) await self.close(exception=error) raise error except Exception as e: @@ -211,7 +241,7 @@ async def close(self, exception=None): elif isinstance(exception, EventHubError): self.error = exception elif isinstance(exception, (errors.LinkDetach, errors.ConnectionClose)): - self.error = EventHubError(str(exception), exception) + self.error = ConnectError(str(exception), exception) elif exception: self.error = EventHubError(str(exception)) else: @@ -219,14 +249,13 @@ async def close(self, exception=None): await self._handler.close_async() async def _send_event_data(self, event_data): - if not self.running: - await self.open() + await self._open() try: self._handler.send_message(event_data.message) if self._outcome != MessageSendResult.Ok: raise Sender._error(self._outcome, self._condition) except errors.MessageException as failed: - error = EventHubError(str(failed), failed) + error = EventDataError(str(failed), failed) await self.close(exception=error) raise error except (errors.TokenExpired, errors.AuthenticationException): @@ -238,7 +267,7 @@ async def _send_event_data(self, event_data): await self.reconnect() else: log.info("Sender detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + error = ConnectError(str(shutdown), shutdown) await self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: @@ -247,7 +276,7 @@ async def _send_event_data(self, event_data): await self.reconnect() else: log.info("Sender detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + error = ConnectError(str(shutdown), shutdown) await self.close(exception=error) raise error except Exception as e: @@ -258,34 +287,14 @@ async def _send_event_data(self, event_data): else: return self._outcome - async def send(self, event_data): - """ - Sends an event data and asynchronously waits until - acknowledgement is received or operation times out. - - :param event_data: The event to be sent. - :type event_data: ~azure.eventhub.common.EventData - :raises: ~azure.eventhub.common.EventHubError if the message fails to - send. - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START eventhub_client_async_send] - :end-before: [END eventhub_client_async_send] - :language: python - :dedent: 4 - :caption: Sends an event data and asynchronously waits - until acknowledgement is received or operation times out. - - """ - if self.error: - raise self.error - if event_data.partition_key and self.partition: - raise ValueError("EventData partition key cannot be used with a partition sender.") - event_data.message.on_send_complete = self._on_outcome - await self._send_event_data(event_data) + @staticmethod + def _set_batching_label(event_datas, batching_label): + ed_iter = iter(event_datas) + for ed in ed_iter: + ed._batching_label = batching_label + yield ed - async def send_batch(self, batch_event_data): + async def send(self, event_data, batching_label=None): """ Sends an event data and blocks until acknowledgement is received or operation times out. @@ -308,87 +317,16 @@ async def send_batch(self, batch_event_data): """ if self.error: raise self.error - - def verify_partition(event_datas): - ed_iter = iter(event_datas) - try: - ed = next(ed_iter) - partition_key = ed.partition_key - yield ed - except StopIteration: - raise ValueError("batch_event_data must not be empty") - for ed in ed_iter: - if ed.partition_key != partition_key: - raise ValueError("partition key of all EventData must be the same if being sent in a batch") - yield ed - - wrapper_event_data = _BatchSendEventData(verify_partition(batch_event_data)) + if isinstance(event_data, EventData): + if batching_label: + event_data._batching_label = batching_label + wrapper_event_data = event_data + else: + wrapper_event_data = _BatchSendEventData( + self._set_batching_label(event_data, batching_label), + batching_label=batching_label) if batching_label else _BatchSendEventData(event_data) wrapper_event_data.message.on_send_complete = self._on_outcome - return await self._send_event_data(wrapper_event_data) - - def queue_message(self, event_data, callback=None): - """ - Transfers an event data and notifies the callback when the operation is done. - - :param event_data: The event to be sent. - :type event_data: ~azure.eventhub.common.EventData - :param callback: Callback to be run once the message has been send. - This must be a function that accepts two arguments. - :type callback: callable[~uamqp.constants.MessageSendResult, ~azure.eventhub.common.EventHubError] - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START eventhub_client_transfer] - :end-before: [END eventhub_client_transfer] - :language: python - :dedent: 4 - :caption: Transfers an event data and notifies the callback when the operation is done. - - """ - if self.error: - raise self.error - if not self.running: - self.open() - if event_data.partition_key and self.partition: - raise ValueError("EventData partition key cannot be used with a partition sender.") - if callback: - event_data.message.on_send_complete = lambda o, c: callback(o, Sender._error(o, c)) - self._handler.queue_message(event_data.message) - - async def send_pending_messages(self): - """ - Wait until all transferred events have been sent. - """ - if self.error: - raise self.error - if not self.running: - raise ValueError("Unable to send until client has been started.") - try: - await self._handler.wait_async() - except (errors.TokenExpired, errors.AuthenticationException): - log.info("AsyncSender disconnected due to token error. Attempting reconnect.") - await self.reconnect() - except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("AsyncSender detached. Attempting reconnect.") - await self.reconnect() - else: - log.info("AsyncSender detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close(exception=error) - raise error - except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("AsyncSender detached. Attempting reconnect.") - await self.reconnect() - else: - log.info("AsyncSender detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - await self.close(exception=error) - raise error - except Exception as e: - log.info("Unexpected error occurred (%r).", e) - raise EventHubError("Send failed: {}".format(e)) + await self._send_event_data(wrapper_event_data) def _on_outcome(self, outcome, condition): """ diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index 58cd975e4baf..8fb7940850e9 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -17,15 +17,17 @@ from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus import uamqp -from uamqp import Message +from uamqp import Message, AMQPClient from uamqp import authentication from uamqp import constants from azure.eventhub import __version__ from azure.eventhub.sender import Sender from azure.eventhub.receiver import Receiver -from azure.eventhub.common import EventHubError, parse_sas_token +from azure.eventhub.common import parse_sas_token +from azure.eventhub.error import EventHubError from .client_abstract import EventHubClientAbstract +from .common import SASTokenCredentials, SharedKeyCredentials log = logging.getLogger(__name__) @@ -59,16 +61,20 @@ def _create_auth(self, username=None, password=None): http_proxy = self.config.http_proxy transport_type = self.config.transport_type auth_timeout = self.config.auth_timeout - if self.aad_credential and self.sas_token: - raise ValueError("Can't have both sas_token and aad_credential") - elif self.aad_credential: - get_jwt_token = functools.partial(self.aad_credential.get_token, ['https://eventhubs.azure.net//.default']) - return authentication.JWTTokenAuth(self.auth_uri, self.auth_uri, - get_jwt_token, http_proxy=http_proxy, - transport_type=transport_type) - elif self.sas_token: - token = self.sas_token() if callable(self.sas_token) else self.sas_token + # TODO: the following code can be refactored to create auth from classes directly instead of using if-else + if isinstance(self.credential, SharedKeyCredentials): + username = username or self._auth_config['username'] + password = password or self._auth_config['password'] + if "@sas.root" in username: + return authentication.SASLPlain( + self.host, username, password, http_proxy=http_proxy, transport_type=transport_type) + return authentication.SASTokenAuth.from_shared_access_key( + self.auth_uri, username, password, timeout=auth_timeout, http_proxy=http_proxy, + transport_type=transport_type) + + elif isinstance(self.credential, SASTokenCredentials): + token = self.credential.get_sas_token() try: expiry = int(parse_sas_token(token)['se']) except (KeyError, TypeError, IndexError): @@ -80,15 +86,15 @@ def _create_auth(self, username=None, password=None): http_proxy=http_proxy, transport_type=transport_type) - username = username or self._auth_config['username'] - password = password or self._auth_config['password'] - if "@sas.root" in username: - return authentication.SASLPlain( - self.address.hostname, username, password, http_proxy=http_proxy, transport_type=transport_type) - return authentication.SASTokenAuth.from_shared_access_key( - self.auth_uri, username, password, timeout=auth_timeout, http_proxy=http_proxy, transport_type=transport_type) + else: # Azure credential + get_jwt_token = functools.partial(self.credential.get_token, + ['https://eventhubs.azure.net//.default']) + return authentication.JWTTokenAuth(self.auth_uri, self.auth_uri, + get_jwt_token, http_proxy=http_proxy, + transport_type=transport_type) + - def get_eventhub_information(self): + def get_properties(self): """ Get details on the specified EventHub. Keys in the details dictionary include: @@ -118,20 +124,68 @@ def get_eventhub_information(self): eh_info = response.get_data() output = {} if eh_info: - output['name'] = eh_info[b'name'].decode('utf-8') - output['type'] = eh_info[b'type'].decode('utf-8') - output['created_at'] = datetime.datetime.fromtimestamp(float(eh_info[b'created_at'])/1000) - output['partition_count'] = eh_info[b'partition_count'] + output['path'] = eh_info[b'name'].decode('utf-8') + output['created_at'] = datetime.datetime.utcfromtimestamp(float(eh_info[b'created_at'])/1000) output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']] return output finally: mgmt_client.close() + def get_partition_ids(self): + return self.get_properties()['partition_ids'] + + def get_partition_properties(self, partition): + """ + Get information on the specified partition async. + Keys in the details dictionary include: + + -'name' + -'type' + -'partition' + -'begin_sequence_number' + -'last_enqueued_sequence_number' + -'last_enqueued_offset' + -'last_enqueued_time_utc' + -'is_partition_empty' + + :param partition: The target partition id. + :type partition: str + :rtype: dict + """ + alt_creds = { + "username": self._auth_config.get("iot_username"), + "password": self._auth_config.get("iot_password")} + try: + mgmt_auth = self._create_auth(**alt_creds) + mgmt_client = AMQPClient(self.mgmt_target, auth=mgmt_auth, debug=self.debug) + mgmt_client.open() + mgmt_msg = Message(application_properties={'name': self.eh_name, + 'partition': partition}) + response = mgmt_client.mgmt_request( + mgmt_msg, + constants.READ_OPERATION, + op_type=b'com.microsoft:partition', + status_code_field=b'status-code', + description_fields=b'status-description') + partition_info = response.get_data() + output = {} + if partition_info: + output['event_hub_path'] = partition_info[b'name'].decode('utf-8') + # output['type'] = partition_info[b'type'].decode('utf-8') + output['id'] = partition_info[b'partition'].decode('utf-8') + output['beginning_sequence_number'] = partition_info[b'begin_sequence_number'] + output['last_enqueued_sequence_number'] = partition_info[b'last_enqueued_sequence_number'] + output['last_enqueued_offset'] = partition_info[b'last_enqueued_offset'].decode('utf-8') + output['last_enqueued_time_utc'] = datetime.datetime.utcfromtimestamp( + float(partition_info[b'last_enqueued_time_utc'] / 1000)) + output['is_empty'] = partition_info[b'is_partition_empty'] + return output + finally: + mgmt_client.close() + def create_receiver( - self, consumer_group, partition, offset=None, epoch=None, operation=None, + self, partition_id, consumer_group="$Default", event_position=None, exclusive_receiver_priority=None, operation=None, prefetch=None, - keep_alive=None, - auto_reconnect=None, ): """ Add a receiver to the client for a particular consumer group and partition. @@ -140,8 +194,8 @@ def create_receiver( :type consumer_group: str :param partition: The ID of the partition. :type partition: str - :param offset: The offset from which to start receiving. - :type offset: ~azure.eventhub.common.Offset + :param event_position: The position from which to start receiving. + :type event_position: ~azure.eventhub.common.EventPosition :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int :operation: An optional operation to be appended to the hostname in the source URL. @@ -158,23 +212,17 @@ def create_receiver( :caption: Add a receiver to the client for a particular consumer group and partition. """ - keep_alive = self.config.keep_alive if keep_alive is None else keep_alive - auto_reconnect = self.config.auto_reconnect if auto_reconnect is None else auto_reconnect prefetch = self.config.prefetch if prefetch is None else prefetch path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( - self.address.hostname, path, consumer_group, partition) + self.address.hostname, path, consumer_group, partition_id) handler = Receiver( - self, source_url, offset=offset, epoch=epoch, prefetch=prefetch, keep_alive=keep_alive, auto_reconnect=auto_reconnect) + self, source_url, event_position=event_position, exclusive_receiver_priority=exclusive_receiver_priority, + prefetch=prefetch) return handler - def create_epoch_receiver( - self, consumer_group, partition, epoch, prefetch=300, - operation=None): - return self.create_receiver(consumer_group, partition, epoch=epoch, prefetch=prefetch, operation=operation) - - def create_sender(self, partition=None, operation=None, send_timeout=None, keep_alive=None, auto_reconnect=None): + def create_sender(self, partition_id=None, operation=None, send_timeout=None): """ Add a sender to the client to send EventData object to an EventHub. @@ -209,9 +257,7 @@ def create_sender(self, partition=None, operation=None, send_timeout=None, keep_ if operation: target = target + operation send_timeout = self.config.send_timeout if send_timeout is None else send_timeout - keep_alive = self.config.keep_alive if keep_alive is None else keep_alive - auto_reconnect = self.config.auto_reconnect if auto_reconnect is None else auto_reconnect handler = Sender( - self, target, partition=partition, send_timeout=send_timeout, keep_alive=keep_alive, auto_reconnect=auto_reconnect) + self, target, partition=partition_id, send_timeout=send_timeout) return handler diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 1435d15bd2be..26435fd93635 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -5,7 +5,6 @@ from __future__ import unicode_literals import logging -import datetime import sys import uuid import time @@ -17,16 +16,11 @@ except ImportError: from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus -import uamqp -from uamqp import Message -from uamqp import authentication -from uamqp import constants from azure.eventhub import __version__ -from azure.eventhub.sender import Sender -from azure.eventhub.receiver import Receiver -from azure.eventhub.common import EventHubError, parse_sas_token from azure.eventhub.configuration import Configuration +from azure.eventhub import constants +from .common import SASTokenCredentials, SharedKeyCredentials, Address log = logging.getLogger(__name__) @@ -101,8 +95,7 @@ class EventHubClientAbstract(object): """ - def __init__( - self, address, username=None, password=None, sas_token=None, aad_credential=None, **kwargs): + def __init__(self, host, event_hub_path, credential, **kwargs): """ Constructs a new EventHubClient with the given address URL. @@ -131,67 +124,29 @@ def __init__( :type sas_token: str or callable """ self.container_id = "eventhub.pysdk-" + str(uuid.uuid4())[:8] - self.sas_token = sas_token - self.address = urlparse(address) - self.aad_credential = aad_credential - self.eh_name = self.address.path.lstrip('/') - # self.http_proxy = kwargs.get("http_proxy") + self.address = Address() + self.address.hostname = host + self.address.path = "/" + event_hub_path if event_hub_path else "" + self._auth_config = {} + self.credential = credential + if isinstance(credential, SharedKeyCredentials): + self.username = credential.policy + self.password = credential.key + self._auth_config['username'] = self.username + self._auth_config['password'] = self.password + + self.host = host + self.eh_name = event_hub_path self.keep_alive = kwargs.get("keep_alive", 30) self.auto_reconnect = kwargs.get("auto_reconnect", True) - self.mgmt_target = "amqps://{}/{}".format(self.address.hostname, self.eh_name) - url_username = unquote_plus(self.address.username) if self.address.username else None - username = username or url_username - url_password = unquote_plus(self.address.password) if self.address.password else None - password = password or url_password - if (not username or not password) and not sas_token: - raise ValueError("Please supply either username and password, or a SAS token") + self.mgmt_target = "amqps://{}/{}".format(self.host, self.eh_name) self.auth_uri = "sb://{}{}".format(self.address.hostname, self.address.path) - self._auth_config = {'username': username, 'password': password} self.get_auth = functools.partial(self._create_auth) - # self.debug = kwargs.get("debug", False) # debug - #self.auth_timeout = auth_timeout - - self.stopped = False self.config = Configuration(**kwargs) self.debug = self.config.network_tracing log.info("%r: Created the Event Hub client", self.container_id) - @classmethod - def from_sas_token(cls, address, sas_token, eventhub=None, **kwargs): - """Create an EventHubClient from an existing auth token or token generator. - - :param address: The Event Hub address URL - :type address: str - :param sas_token: A SAS token or function that returns a SAS token. If a function is supplied, - it will be used to retrieve subsequent tokens in the case of token expiry. The function should - take no arguments. - :type sas_token: str or callable - :param eventhub: The name of the EventHub, if not already included in the address URL. - :type eventhub: str - :param debug: Whether to output network trace logs to the logger. Default - is `False`. - :type debug: bool - :param http_proxy: HTTP proxy settings. This must be a dictionary with the following - keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). - Additionally the following keys may also be present: 'username', 'password'. - :type http_proxy: dict[str, Any] - :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. - The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. - :type auth_timeout: int - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START create_eventhub_client_sas_token] - :end-before: [END create_eventhub_client_sas_token] - :language: python - :dedent: 4 - :caption: Create an EventHubClient from an existing auth token or token generator. - - """ - address = _build_uri(address, eventhub) - return cls(address, sas_token=sas_token, **kwargs) - @classmethod def from_connection_string(cls, conn_str, eventhub=None, **kwargs): """Create an EventHubClient from a connection string. @@ -223,8 +178,12 @@ def from_connection_string(cls, conn_str, eventhub=None, **kwargs): """ address, policy, key, entity = _parse_conn_str(conn_str) entity = eventhub or entity - address = _build_uri(address, entity) - return cls(address, username=policy, password=key, **kwargs) + left_slash_pos = address.find("//") + if left_slash_pos != -1: + host = address[left_slash_pos + 2:] + else: + host = address + return cls(host, entity, SharedKeyCredentials(policy, key), **kwargs) @classmethod def from_iothub_connection_string(cls, conn_str, **kwargs): @@ -257,7 +216,12 @@ def from_iothub_connection_string(cls, conn_str, **kwargs): hub_name = address.split('.')[0] username = "{}@sas.root.{}".format(policy, hub_name) password = _generate_sas_token(address, policy, key) - client = cls("amqps://" + address, username=username, password=password, **kwargs) + left_slash_pos = address.find("//") + if left_slash_pos != -1: + host = address[left_slash_pos + 2:] + else: + host = address + client = cls(host, "", SharedKeyCredentials(username, password), **kwargs) client._auth_config = { # pylint: disable=protected-access 'iot_username': policy, 'iot_password': key, @@ -265,16 +229,11 @@ def from_iothub_connection_string(cls, conn_str, **kwargs): 'password': password} return client - @classmethod - def from_aad_credential(cls, address, aad_credential, eventhub=None, **kwargs): - address = _build_uri(address, eventhub) - return cls(address, aad_credential=aad_credential, **kwargs) - @abstractmethod def _create_auth(self, username=None, password=None): pass - def create_properties(self): # pylint: disable=no-self-use + def create_properties(self, user_agent=None): # pylint: disable=no-self-use """ Format the properties with which to instantiate the connection. This acts like a user agent over HTTP. @@ -286,20 +245,29 @@ def create_properties(self): # pylint: disable=no-self-use properties["version"] = __version__ properties["framework"] = "Python {}.{}.{}".format(*sys.version_info[0:3]) properties["platform"] = sys.platform + + final_user_agent = 'azsdk-python-eventhub/{} ({}; {})'.format( + __version__, properties["framework"], sys.platform) + if user_agent: + final_user_agent = '{}, {}'.format(final_user_agent, user_agent) + + if len(final_user_agent) > constants.MAX_USER_AGENT_LENGTH: + raise ValueError("The user-agent string cannot be more than {} in length." + "Current user_agent string is: {} with length: {}".format( + constants.MAX_USER_AGENT_LENGTH, final_user_agent, len(final_user_agent))) + + properties["user-agent"] = final_user_agent return properties def _process_redirect_uri(self, redirect): redirect_uri = redirect.address.decode('utf-8') auth_uri, _, _ = redirect_uri.partition("/ConsumerGroups") self.address = urlparse(auth_uri) + self.host = self.address.hostname self.auth_uri = "sb://{}{}".format(self.address.hostname, self.address.path) self.eh_name = self.address.path.lstrip('/') self.mgmt_target = redirect_uri - @abstractmethod - def get_eventhub_information(self): - pass - @abstractmethod def create_receiver( self, consumer_group, partition, epoch=None, offset=None, prefetch=300, diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index 03a602616c4d..3af21e5d2e86 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -11,41 +11,11 @@ import six -from uamqp import Message, BatchMessage +import uamqp +from uamqp import BatchMessage from uamqp import types, constants, errors from uamqp.message import MessageHeader, MessageProperties -_NO_RETRY_ERRORS = ( - b"com.microsoft:argument-out-of-range", - b"com.microsoft:entity-disabled", - b"com.microsoft:auth-failed", - b"com.microsoft:precondition-failed", - b"com.microsoft:argument-error" -) - -def _error_handler(error): - """ - Called internally when an event has failed to send so we - can parse the error to determine whether we should attempt - to retry sending the event again. - Returns the action to take according to error type. - - :param error: The error received in the send attempt. - :type error: Exception - :rtype: ~uamqp.errors.ErrorAction - """ - if error.condition == b'com.microsoft:server-busy': - return errors.ErrorAction(retry=True, backoff=4) - if error.condition == b'com.microsoft:timeout': - return errors.ErrorAction(retry=True, backoff=2) - if error.condition == b'com.microsoft:operation-cancelled': - return errors.ErrorAction(retry=True) - if error.condition == b"com.microsoft:container-close": - return errors.ErrorAction(retry=True, backoff=4) - if error.condition in _NO_RETRY_ERRORS: - return errors.ErrorAction(retry=False) - return errors.ErrorAction(retry=True) - def parse_sas_token(sas_token): """Parse a SAS token into its components. @@ -63,6 +33,9 @@ def parse_sas_token(sas_token): return sas_data +Message = uamqp.Message + + class EventData(object): """ The EventData class is a holder of event content. @@ -118,6 +91,26 @@ def __init__(self, body=None, to_device=None, message=None): else: self.message = Message(body, properties=self.msg_properties) + def __str__(self): + dic = { + 'body': self.body_as_str(), + 'application_properties': str(self.application_properties) + } + + if self.sequence_number: + dic['sequence_number'] = str(self.sequence_number) + if self.offset: + dic['offset'] = str(self.offset) + if self.enqueued_time: + dic['enqueued_time'] = str(self.enqueued_time) + if self.device_id: + dic['device_id'] = str(self.device_id) + if self._batching_label: + dic['_batching_label'] = str(self._batching_label) + + + return str(dic) + @property def sequence_number(self): """ @@ -130,9 +123,9 @@ def sequence_number(self): @property def offset(self): """ - The offset of the event data object. + The position of the event data object. - :rtype: ~azure.eventhub.common.Offset + :rtype: ~azure.eventhub.common.EventPosition """ try: return EventPosition(self._annotations[EventData.PROP_OFFSET].decode('UTF-8')) @@ -162,7 +155,7 @@ def device_id(self): return self._annotations.get(EventData.PROP_DEVICE_ID, None) @property - def partition_key(self): + def _batching_label(self): """ The partition key of the event data object. @@ -173,8 +166,8 @@ def partition_key(self): except KeyError: return self._annotations.get(EventData.PROP_PARTITION_KEY, None) - @partition_key.setter - def partition_key(self, value): + @_batching_label.setter + def _batching_label(self, value): """ Set the partition key of the event data object. @@ -189,6 +182,7 @@ def partition_key(self, value): self.message.header = header self._annotations = annotations + @property def application_properties(self): """ @@ -262,9 +256,20 @@ def encode_message(self): class _BatchSendEventData(EventData): - def __init__(self, batch_event_data): - # TODO: rethink if to_device should be included in - self.message = BatchMessage(data=batch_event_data, multi_messages=True, properties=None) + def __init__(self, batch_event_data, batching_label=None): + self.message = BatchMessage(data=batch_event_data, multi_messages=False, properties=None) + self.set_batching_label(batching_label) + + def set_batching_label(self, value): + if value: + annotations = self.message.annotations + if annotations is None: + annotations = dict() + annotations[types.AMQPSymbol(EventData.PROP_PARTITION_KEY)] = value + header = MessageHeader() + header.durable = True + self.message.annotations = annotations + self.message.header = header class EventPosition(object): @@ -294,9 +299,12 @@ def __init__(self, value, inclusive=False): :param inclusive: Whether to include the supplied value as the start point. :type inclusive: bool """ - self.value = value + self.value = value if value is not None else "-1" self.inclusive = inclusive + def __str__(self): + return str(self.value) + def selector(self): """ Creates a selector expression of the offset. @@ -312,12 +320,12 @@ def selector(self): return ("amqp.annotation.x-opt-offset {} '{}'".format(operator, self.value)).encode('utf-8') @staticmethod - def from_start_of_stream(): - return EventPosition("-1") + def first_available(): + return FIRST_AVAILABLE - @staticmethod - def from_end_of_stream(): - return EventPosition("@latest") + @classmethod + def new_events_only(cls): + return NEW_EVENTS_ONLY @staticmethod def from_offset(offset, inclusive=False): @@ -332,53 +340,29 @@ def from_enqueued_time(enqueued_time, inclusive=False): return EventPosition(enqueued_time, inclusive) -class EventHubError(Exception): - """ - Represents an error happened in the client. - - :ivar message: The error message. - :vartype message: str - :ivar error: The error condition, if available. - :vartype error: str - :ivar details: The error details, if included in the - service response. - :vartype details: dict[str, str] - """ +FIRST_AVAILABLE = EventPosition("-1") +NEW_EVENTS_ONLY = EventPosition("@latest") + + +# TODO: move some behaviors to these two classes. +class SASTokenCredentials(object): + def __init__(self, token): + self.token = token + + def get_sas_token(self): + if callable(self.token): + return self.token() + else: + return self.token + + +class SharedKeyCredentials(object): + def __init__(self, policy, key): + self.policy = policy + self.key = key + - def __init__(self, message, details=None): - self.error = None - self.message = message - self.details = details - if isinstance(message, constants.MessageSendResult): - self.message = "Message send failed with result: {}".format(message) - if details and isinstance(details, Exception): - try: - condition = details.condition.value.decode('UTF-8') - except AttributeError: - condition = details.condition.decode('UTF-8') - _, _, self.error = condition.partition(':') - self.message += "\nError: {}".format(self.error) - try: - self._parse_error(details.description) - for detail in self.details: - self.message += "\n{}".format(detail) - except: # pylint: disable=bare-except - self.message += "\n{}".format(details) - super(EventHubError, self).__init__(self.message) - - def _parse_error(self, error_list): - details = [] - self.message = error_list if isinstance(error_list, six.text_type) else error_list.decode('UTF-8') - details_index = self.message.find(" Reference:") - if details_index >= 0: - details_msg = self.message[details_index + 1:] - self.message = self.message[0:details_index] - - tracking_index = details_msg.index(", TrackingId:") - system_index = details_msg.index(", SystemTracker:") - timestamp_index = details_msg.index(", Timestamp:") - details.append(details_msg[:tracking_index]) - details.append(details_msg[tracking_index + 2: system_index]) - details.append(details_msg[system_index + 2: timestamp_index]) - details.append(details_msg[timestamp_index + 2:]) - self.details = details +class Address(object): + def __init__(self, hostname=None, path=None): + self.hostname = hostname + self.path = path diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py index 2d7a7be57638..b6e030c9e3a6 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py @@ -3,19 +3,19 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -from uamqp.constants import TransportType +from .constants import TransportType class Configuration(object): def __init__(self, **kwargs): self.user_agent = kwargs.get("user_agent") self.max_retries = kwargs.get("max_retries", 3) - self.network_tracing = kwargs.get("debug", False) + self.network_tracing = kwargs.get("network_tracing", False) self.http_proxy = kwargs.get("http_proxy") - self.auto_reconnect = kwargs.get("auto_reconnect", False) - self.keep_alive = kwargs.get("keep_alive", 0) self.transport_type = TransportType.AmqpOverWebsocket if self.http_proxy \ else kwargs.get("transport_type", TransportType.Amqp) self.auth_timeout = kwargs.get("auth_timeout", 60) - self.prefetch = kwargs.get("prefetch") + self.prefetch = kwargs.get("prefetch", 300) + self.max_batch_size = kwargs.get("max_batch_size") + self.receive_timeout = kwargs.get("receive_timeout", 0) self.send_timeout = kwargs.get("send_timeout", 60) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/constants.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/constants.py new file mode 100644 index 000000000000..e71d3815f48f --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/constants.py @@ -0,0 +1,11 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +from uamqp import constants + +MAX_USER_AGENT_LENGTH = 512 +TransportType = constants.TransportType +MessageSendResult = constants.MessageSendResult diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py new file mode 100644 index 000000000000..69aaa701496b --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py @@ -0,0 +1,108 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +from uamqp import types, constants, errors +import six +from azure.core import AzureError + +_NO_RETRY_ERRORS = ( + b"com.microsoft:argument-out-of-range", + b"com.microsoft:entity-disabled", + b"com.microsoft:auth-failed", + b"com.microsoft:precondition-failed", + b"com.microsoft:argument-error" +) + +def _error_handler(error): + """ + Called internally when an event has failed to send so we + can parse the error to determine whether we should attempt + to retry sending the event again. + Returns the action to take according to error type. + + :param error: The error received in the send attempt. + :type error: Exception + :rtype: ~uamqp.errors.ErrorAction + """ + if error.condition == b'com.microsoft:server-busy': + return errors.ErrorAction(retry=True, backoff=4) + if error.condition == b'com.microsoft:timeout': + return errors.ErrorAction(retry=True, backoff=2) + if error.condition == b'com.microsoft:operation-cancelled': + return errors.ErrorAction(retry=True) + if error.condition == b"com.microsoft:container-close": + return errors.ErrorAction(retry=True, backoff=4) + if error.condition in _NO_RETRY_ERRORS: + return errors.ErrorAction(retry=False) + return errors.ErrorAction(retry=True) + + +class EventHubError(AzureError): + """ + Represents an error happened in the client. + + :ivar message: The error message. + :vartype message: str + :ivar error: The error condition, if available. + :vartype error: str + :ivar details: The error details, if included in the + service response. + :vartype details: dict[str, str] + """ + + def __init__(self, message, details=None): + self.error = None + self.message = message + self.details = details + if isinstance(message, constants.MessageSendResult): + self.message = "Message send failed with result: {}".format(message) + if details and isinstance(details, Exception): + try: + condition = details.condition.value.decode('UTF-8') + except AttributeError: + try: + condition = details.condition.decode('UTF-8') + except AttributeError: + condition = None + if condition: + _, _, self.error = condition.partition(':') + self.message += "\nError: {}".format(self.error) + try: + self._parse_error(details.description) + for detail in self.details: + self.message += "\n{}".format(detail) + except: # pylint: disable=bare-except + self.message += "\n{}".format(details) + super(EventHubError, self).__init__(self.message) + + def _parse_error(self, error_list): + details = [] + self.message = error_list if isinstance(error_list, six.text_type) else error_list.decode('UTF-8') + details_index = self.message.find(" Reference:") + if details_index >= 0: + details_msg = self.message[details_index + 1:] + self.message = self.message[0:details_index] + + tracking_index = details_msg.index(", TrackingId:") + system_index = details_msg.index(", SystemTracker:") + timestamp_index = details_msg.index(", Timestamp:") + details.append(details_msg[:tracking_index]) + details.append(details_msg[tracking_index + 2: system_index]) + details.append(details_msg[system_index + 2: timestamp_index]) + details.append(details_msg[timestamp_index + 2:]) + self.details = details + + +class AuthenticationError(EventHubError): + pass + + +class ConnectError(EventHubError): + pass + + +class EventDataError(EventHubError): + pass + diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py index 4577d0332af5..4643cb29419f 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py @@ -11,7 +11,8 @@ from uamqp import types, errors from uamqp import ReceiveClient, Source -from azure.eventhub.common import EventHubError, EventData, _error_handler +from azure.eventhub.common import EventData +from azure.eventhub.error import EventHubError, AuthenticationError, ConnectError, _error_handler log = logging.getLogger(__name__) @@ -33,7 +34,7 @@ class Receiver(object): timeout = 0 _epoch = b'com.microsoft:epoch' - def __init__(self, client, source, offset=None, prefetch=300, epoch=None, keep_alive=None, auto_reconnect=True): + def __init__(self, client, source, event_position=None, prefetch=300, exclusive_receiver_priority=None, keep_alive=None, auto_reconnect=True): """ Instantiate a receiver. @@ -50,10 +51,10 @@ def __init__(self, client, source, offset=None, prefetch=300, epoch=None, keep_a self.running = False self.client = client self.source = source - self.offset = offset - self.iter_started = False + self.offset = event_position + self.messages_iter = None self.prefetch = prefetch - self.epoch = epoch + self.exclusive_receiver_priority = exclusive_receiver_priority self.keep_alive = keep_alive self.auto_reconnect = auto_reconnect self.retry_policy = errors.ErrorPolicy(max_retries=self.client.config.max_retries, on_error=_error_handler) @@ -66,19 +67,19 @@ def __init__(self, client, source, offset=None, prefetch=300, epoch=None, keep_a source = Source(self.source) if self.offset is not None: source.set_filter(self.offset.selector()) - if epoch: - self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))} + if exclusive_receiver_priority: + self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(exclusive_receiver_priority))} self._handler = ReceiveClient( source, auth=self.client.get_auth(), - debug=self.client.debug, + debug=self.client.config.network_tracing, prefetch=self.prefetch, link_properties=self.properties, timeout=self.timeout, error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties()) + properties=self.client.create_properties(self.client.config.user_agent)) def __enter__(self): return self @@ -87,16 +88,14 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.close(exc_val) def __iter__(self): - if not self.running: - self.open() - if not self.iter_started: - self.iter_started = True - self.messages_iter = self._handler.receive_messages_iter() return self def __next__(self): + self._open() while True: try: + if not self.messages_iter: + self.messages_iter = self._handler.receive_messages_iter() message = next(self.messages_iter) event_data = EventData(message=message) self.offset = event_data.offset @@ -108,25 +107,29 @@ def __next__(self): if shutdown.action.retry and self.auto_reconnect: log.info("Receiver detached. Attempting reconnect.") self.reconnect() - log.info("Receiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - self.close(exception=error) - raise error + else: + log.info("Receiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + self.close(exception=error) + raise error except errors.MessageHandlerError as shutdown: if self.auto_reconnect: log.info("Receiver detached. Attempting reconnect.") self.reconnect() - log.info("Receiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - self.close(exception=error) - raise error + else: + log.info("Receiver detached. Shutting down.") + error = EventHubError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except StopIteration: + raise except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) error = EventHubError("Receive failed: {}".format(e)) self.close(exception=error) raise error - def open(self): + def _open(self): """ Open the Receiver using the supplied conneciton. If the handler has previously been redirected, the redirect @@ -145,7 +148,6 @@ def open(self): """ # pylint: disable=protected-access - self.running = True if self.redirected: self.source = self.redirected.address source = Source(self.source) @@ -157,17 +159,45 @@ def open(self): self._handler = ReceiveClient( source, auth=self.client.get_auth(**alt_creds), - debug=self.client.debug, + debug=self.client.config.network_tracing, prefetch=self.prefetch, link_properties=self.properties, timeout=self.timeout, error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties()) - self._handler.open() - while not self._handler.client_ready(): - time.sleep(0.05) + properties=self.client.create_properties(self.client.config.user_agent)) + if not self.running: + try: + self._handler.open() + self.running = True + while not self._handler.client_ready(): + time.sleep(0.05) + + except errors.AuthenticationException: + log.info("Receiver failed authentication. Retrying...") + self.reconnect() + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("Receiver detached. Attempting reconnect.") + self.reconnect() + else: + log.info("Receiver detached. Failed to connect") + error = ConnectError(str(shutdown), shutdown) + raise error + except errors.AMQPConnectionError as shutdown: + if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: + log.info("Receiver couldn't authenticate (%r).", shutdown) + error = AuthenticationError(str(shutdown), shutdown) + raise error + else: + log.info("Receiver connection error (%r).", shutdown) + error = ConnectError(str(shutdown), shutdown) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r)", e) + error = EventHubError("Receiver connect failed: {}".format(e)) + raise error def _reconnect(self): # pylint: disable=too-many-statements # pylint: disable=protected-access @@ -181,22 +211,23 @@ def _reconnect(self): # pylint: disable=too-many-statements self._handler = ReceiveClient( source, auth=self.client.get_auth(**alt_creds), - debug=self.client.debug, + debug=self.client.config.network_tracing, prefetch=self.prefetch, link_properties=self.properties, timeout=self.timeout, error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties()) + properties=self.client.create_properties(self.client.config.user_agent)) + self.messages_iter = None try: self._handler.open() while not self._handler.client_ready(): time.sleep(0.05) return True - except errors.TokenExpired as shutdown: + except errors.AuthenticationException as shutdown: log.info("Receiver disconnected due to token expiry. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + error = AuthenticationError(str(shutdown), shutdown) self.close(exception=error) raise error except (errors.LinkDetach, errors.ConnectionClose) as shutdown: @@ -204,7 +235,7 @@ def _reconnect(self): # pylint: disable=too-many-statements log.info("Receiver detached. Attempting reconnect.") return False log.info("Receiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + error = ConnectError(str(shutdown), shutdown) self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: @@ -212,7 +243,7 @@ def _reconnect(self): # pylint: disable=too-many-statements log.info("Receiver detached. Attempting reconnect.") return False log.info("Receiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + error = ConnectError(str(shutdown), shutdown) self.close(exception=error) raise error except errors.AMQPConnectionError as shutdown: @@ -220,7 +251,7 @@ def _reconnect(self): # pylint: disable=too-many-statements log.info("Receiver couldn't authenticate. Attempting reconnect.") return False log.info("Receiver connection error (%r). Shutting down.", shutdown) - error = EventHubError(str(shutdown)) + error = ConnectError(str(shutdown), shutdown) self.close(exception=error) raise error except Exception as e: @@ -235,38 +266,6 @@ def reconnect(self): while not self._reconnect(): time.sleep(self.reconnect_backoff) - def get_handler_state(self): - """ - Get the state of the underlying handler with regards to start - up processes. - - :rtype: ~uamqp.constants.MessageReceiverState - """ - # pylint: disable=protected-access - return self._handler._message_receiver.get_state() - - def has_started(self): - """ - Whether the handler has completed all start up processes such as - establishing the connection, session, link and authentication, and - is not ready to process messages. - **This function is now deprecated and will be removed in v2.0+.** - - :rtype: bool - """ - # pylint: disable=protected-access - timeout = False - auth_in_progress = False - if self._handler._connection.cbs: - timeout, auth_in_progress = self._handler._auth.handle_token() - if timeout: - raise EventHubError("Authorization timeout.") - if auth_in_progress: - return False - if not self._handler._client_ready(): - return False - return True - def close(self, exception=None): """ Close down the handler. If the handler has already closed, @@ -334,43 +333,43 @@ def receive(self, max_batch_size=None, timeout=None): """ if self.error: raise self.error - if not self.running: - self.open() + self._open() + data_batch = [] - try: - timeout_ms = 1000 * timeout if timeout else 0 - message_batch = self._handler.receive_message_batch( - max_batch_size=max_batch_size, - timeout=timeout_ms) - for message in message_batch: - event_data = EventData(message=message) - self.offset = event_data.offset - data_batch.append(event_data) - return data_batch - except (errors.TokenExpired, errors.AuthenticationException): - log.info("Receiver disconnected due to token error. Attempting reconnect.") - self.reconnect() - return data_batch - except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("Receiver detached. Attempting reconnect.") - self.reconnect() + while True: + try: + timeout_ms = 1000 * timeout if timeout else 0 + message_batch = self._handler.receive_message_batch( + max_batch_size=max_batch_size, + timeout=timeout_ms) + for message in message_batch: + event_data = EventData(message=message) + self.offset = event_data.offset + data_batch.append(event_data) return data_batch - log.info("Receiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - self.close(exception=error) - raise error - except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("Receiver detached. Attempting reconnect.") + except (errors.TokenExpired, errors.AuthenticationException): + log.info("Receiver disconnected due to token error. Attempting reconnect.") self.reconnect() - return data_batch - log.info("Receiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - self.close(exception=error) - raise error - except Exception as e: - log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Receive failed: {}".format(e)) - self.close(exception=error) - raise error + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("Receiver detached. Attempting reconnect.") + self.reconnect() + else: + log.info("Receiver detached. Shutting down.") + error = ConnectError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if self.auto_reconnect: + log.info("Receiver detached. Attempting reconnect.") + self.reconnect() + else: + log.info("Receiver detached. Shutting down.") + error = ConnectError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Receive failed: {}".format(e)) + self.close(exception=error) + raise error diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py index ab113eac1c28..ccb193835c20 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py @@ -12,7 +12,9 @@ from uamqp import SendClient from uamqp.constants import MessageSendResult -from azure.eventhub.common import EventHubError, EventData, _BatchSendEventData, _error_handler +from azure.eventhub.common import EventData, _BatchSendEventData +from azure.eventhub.error import EventHubError, ConnectError, \ + AuthenticationError, EventDataError, _error_handler log = logging.getLogger(__name__) @@ -70,12 +72,12 @@ def __init__(self, client, target, partition=None, send_timeout=60, keep_alive=N self._handler = SendClient( self.target, auth=self.client.get_auth(), - debug=self.client.debug, + debug=self.client.config.network_tracing, msg_timeout=self.timeout, error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties()) + properties=self.client.create_properties(self.client.config.user_agent)) self._outcome = None self._condition = None @@ -85,7 +87,7 @@ def __enter__(self): def __exit__(self, exc_type, exc_val, exc_tb): self.close(exc_val) - def open(self): + def _open(self): """ Open the Sender using the supplied conneciton. If the handler has previously been redirected, the redirect @@ -103,21 +105,47 @@ def open(self): :caption: Open the Sender using the supplied conneciton. """ - self.running = True if self.redirected: self.target = self.redirected.address self._handler = SendClient( self.target, auth=self.client.get_auth(), - debug=self.client.debug, + debug=self.client.config.network_tracing, msg_timeout=self.timeout, error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties()) - self._handler.open() - while not self._handler.client_ready(): - time.sleep(0.05) + properties=self.client.create_properties(self.client.config.user_agent)) + if not self.running: + try: + self._handler.open() + self.running = True + while not self._handler.client_ready(): + time.sleep(0.05) + except errors.AuthenticationException: + log.info("Sender failed authentication. Retrying...") + self.reconnect() + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry and self.auto_reconnect: + log.info("Sender detached. Attempting reconnect.") + self.reconnect() + else: + log.info("Sender detached. Failed to connect") + error = ConnectError(str(shutdown), shutdown) + raise error + except errors.AMQPConnectionError as shutdown: + if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: + log.info("Sender couldn't authenticate.", shutdown) + error = AuthenticationError(str(shutdown), shutdown) + raise error + else: + log.info("Sender connection error (%r).", shutdown) + error = ConnectError(str(shutdown), shutdown) + raise error + except Exception as e: + log.info("Unexpected error occurred (%r)", e) + error = EventHubError("Sender connect failed: {}".format(e)) + raise error def _reconnect(self): # pylint: disable=protected-access @@ -126,20 +154,22 @@ def _reconnect(self): self._handler = SendClient( self.target, auth=self.client.get_auth(), - debug=self.client.debug, + debug=self.client.config.network_tracing, msg_timeout=self.timeout, error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties()) + properties=self.client.create_properties(self.client.config.user_agent)) try: self._handler.open() + while not self._handler.client_ready(): + time.sleep(0.05) self._handler.queue_message(*unsent_events) self._handler.wait() return True - except errors.TokenExpired as shutdown: + except errors.AuthenticationException as shutdown: log.info("Sender disconnected due to token expiry. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + error = AuthenticationError(str(shutdown), shutdown) self.close(exception=error) raise error except (errors.LinkDetach, errors.ConnectionClose) as shutdown: @@ -147,7 +177,7 @@ def _reconnect(self): log.info("Sender detached. Attempting reconnect.") return False log.info("Sender reconnect failed. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + error = ConnectError(str(shutdown), shutdown) self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: @@ -155,7 +185,7 @@ def _reconnect(self): log.info("Sender detached. Attempting reconnect.") return False log.info("Sender reconnect failed. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + error = ConnectError(str(shutdown), shutdown) self.close(exception=error) raise error except errors.AMQPConnectionError as shutdown: @@ -163,7 +193,7 @@ def _reconnect(self): log.info("Sender couldn't authenticate. Attempting reconnect.") return False log.info("Sender connection error (%r). Shutting down.", shutdown) - error = EventHubError(str(shutdown)) + error = ConnectError(str(shutdown), shutdown) self.close(exception=error) raise error except Exception as e: @@ -178,16 +208,6 @@ def reconnect(self): while not self._reconnect(): time.sleep(self.reconnect_backoff) - def get_handler_state(self): - """ - Get the state of the underlying handler with regards to start - up processes. - - :rtype: ~uamqp.constants.MessageSenderState - """ - # pylint: disable=protected-access - return self._handler._message_sender.get_state() - def close(self, exception=None): """ Close down the handler. If the handler has already closed, @@ -221,14 +241,14 @@ def close(self, exception=None): self._handler.close() def _send_event_data(self, event_data): - if not self.running: - self.open() + self._open() + try: self._handler.send_message(event_data.message) if self._outcome != MessageSendResult.Ok: raise Sender._error(self._outcome, self._condition) except errors.MessageException as failed: - error = EventHubError(str(failed), failed) + error = EventDataError(str(failed), failed) self.close(exception=error) raise error except (errors.TokenExpired, errors.AuthenticationException): @@ -240,7 +260,7 @@ def _send_event_data(self, event_data): self.reconnect() else: log.info("Sender detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + error = ConnectError(str(shutdown), shutdown) self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: @@ -249,7 +269,7 @@ def _send_event_data(self, event_data): self.reconnect() else: log.info("Sender detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + error = ConnectError(str(shutdown), shutdown) self.close(exception=error) raise error except Exception as e: @@ -260,35 +280,14 @@ def _send_event_data(self, event_data): else: return self._outcome - def send(self, event_data): - """ - Sends an event data and blocks until acknowledgement is - received or operation times out. - - :param event_data: The event to be sent. - :type event_data: ~azure.eventhub.common.EventData - :raises: ~azure.eventhub.common.EventHubError if the message fails to - send. - :return: The outcome of the message send. - :rtype: ~uamqp.constants.MessageSendResult - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START eventhub_client_sync_send] - :end-before: [END eventhub_client_sync_send] - :language: python - :dedent: 4 - :caption: Sends an event data and blocks until acknowledgement is received or operation times out. - - """ - if self.error: - raise self.error - if event_data.partition_key and self.partition: - raise ValueError("EventData partition key cannot be used with a partition sender.") - event_data.message.on_send_complete = self._on_outcome - return self._send_event_data(event_data) + @staticmethod + def _set_batching_label(event_datas, batching_label): + ed_iter = iter(event_datas) + for ed in ed_iter: + ed._batching_label = batching_label + yield ed - def send_batch(self, batch_event_data): + def send(self, event_data, batching_label=None): """ Sends an event data and blocks until acknowledgement is received or operation times out. @@ -311,96 +310,16 @@ def send_batch(self, batch_event_data): """ if self.error: raise self.error - - def verify_partition(event_datas): - ed_iter = iter(event_datas) - try: - ed = next(ed_iter) - partition_key = ed.partition_key - yield ed - except StopIteration: - raise ValueError("batch_event_data must not be empty") - for ed in ed_iter: - if ed.partition_key != partition_key: - raise ValueError("partition key of all EventData must be the same if being sent in a batch") - yield ed - - wrapper_event_data = _BatchSendEventData(verify_partition(batch_event_data)) + if isinstance(event_data, EventData): + if batching_label: + event_data._batching_label = batching_label + wrapper_event_data = event_data + else: + wrapper_event_data = _BatchSendEventData( + self._set_batching_label(event_data, batching_label), + batching_label=batching_label) if batching_label else _BatchSendEventData(event_data) wrapper_event_data.message.on_send_complete = self._on_outcome - return self._send_event_data(wrapper_event_data) - - def queue_message(self, event_data, callback=None): - """ - Transfers an event data and notifies the callback when the operation is done. - - :param event_data: The event to be sent. - :type event_data: ~azure.eventhub.common.EventData - :param callback: Callback to be run once the message has been send. - This must be a function that accepts two arguments. - :type callback: callable[~uamqp.constants.MessageSendResult, ~azure.eventhub.common.EventHubError] - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START eventhub_client_transfer] - :end-before: [END eventhub_client_transfer] - :language: python - :dedent: 4 - :caption: Transfers an event data and notifies the callback when the operation is done. - - """ - if self.error: - raise self.error - if not self.running: - self.open() - if event_data.partition_key and self.partition: - raise ValueError("EventData partition key cannot be used with a partition sender.") - if callback: - event_data.message.on_send_complete = lambda o, c: callback(o, Sender._error(o, c)) - self._handler.queue_message(event_data.message) - - def send_pending_messages(self): - """ - Wait until all transferred events have been sent. - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START eventhub_client_transfer] - :end-before: [END eventhub_client_transfer] - :language: python - :dedent: 4 - :caption: Wait until all transferred events have been sent. - - """ - if self.error: - raise self.error - if not self.running: - self.open() - try: - self._handler.wait() - except (errors.TokenExpired, errors.AuthenticationException): - log.info("Sender disconnected due to token error. Attempting reconnect.") - self.reconnect() - except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("Sender detached. Attempting reconnect.") - self.reconnect() - else: - log.info("Sender detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - self.close(exception=error) - raise error - except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("Sender detached. Attempting reconnect.") - self.reconnect() - else: - log.info("Sender detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - self.close(exception=error) - raise error - except Exception as e: - log.info("Unexpected error occurred (%r).", e) - raise EventHubError("Send failed: {}".format(e)) + self._send_event_data(wrapper_event_data) def _on_outcome(self, outcome, condition): """ diff --git a/sdk/eventhub/azure-eventhubs/conftest.py b/sdk/eventhub/azure-eventhubs/conftest.py index ce6f83adc6af..68a211917f4c 100644 --- a/sdk/eventhub/azure-eventhubs/conftest.py +++ b/sdk/eventhub/azure-eventhubs/conftest.py @@ -165,13 +165,11 @@ def device_id(): @pytest.fixture() def connstr_receivers(connection_str): client = EventHubClient.from_connection_string(connection_str, debug=False) - eh_hub_info = client.get_eventhub_information() - partitions = eh_hub_info["partition_ids"] - - recv_offset = EventPosition("@latest") + partitions = client.get_partition_ids() receivers = [] for p in partitions: - receiver = client.create_receiver("$default", p, prefetch=500, offset=EventPosition("@latest")) + #receiver = client.create_receiver(partition_id=p, prefetch=500, event_position=EventPosition("@latest")) + receiver = client.create_receiver(partition_id=p, prefetch=500, event_position=EventPosition("-1")) receivers.append(receiver) receiver.receive(timeout=1) yield connection_str, receivers @@ -183,12 +181,11 @@ def connstr_receivers(connection_str): @pytest.fixture() def connstr_senders(connection_str): client = EventHubClient.from_connection_string(connection_str, debug=True) - eh_hub_info = client.get_eventhub_information() - partitions = eh_hub_info["partition_ids"] + partitions = client.get_partition_ids() senders = [] for p in partitions: - sender = client.create_sender(partition=p) + sender = client.create_sender(partition_id=p) senders.append(sender) yield connection_str, senders for s in senders: diff --git a/sdk/eventhub/azure-eventhubs/examples/proxy.py b/sdk/eventhub/azure-eventhubs/examples/proxy.py new file mode 100644 index 000000000000..b4a2d51b0411 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/examples/proxy.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python + +""" +An example to show sending and receiving events behind a proxy +""" +import os +import logging + +from azure.eventhub import EventHubClient, EventPosition, EventData + +import examples +logger = examples.get_logger(logging.INFO) + + +# Address can be in either of these formats: +# "amqps://:@.servicebus.windows.net/myeventhub" +# "amqps://.servicebus.windows.net/myeventhub" +ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') + +# SAS policy and key are not required if they are encoded in the URL +USER = os.environ.get('EVENT_HUB_SAS_POLICY') +KEY = os.environ.get('EVENT_HUB_SAS_KEY') +CONSUMER_GROUP = "$default" +EVENT_POSITION = EventPosition.first_available() +PARTITION = "0" +HTTP_PROXY = { + 'proxy_hostname': '127.0.0.1', # proxy hostname + 'proxy_port': 3128, # proxy port + 'username': 'admin', # username used for proxy authentication if needed + 'password': '123456' # password used for proxy authentication if needed +} + + +if not ADDRESS: + raise ValueError("No EventHubs URL supplied.") + +client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY, http_proxy=HTTP_PROXY) +sender = client.create_sender(partition=PARTITION) +receiver = client.create_receiver(consumer_group=CONSUMER_GROUP, partition=PARTITION, event_position=EVENT_POSITION) +try: + event_list = [] + for i in range(20): + event_list.append(EventData("Event Number {}".format(i))) + + print('Start sending events behind a proxy.') + + with sender: + sender.send(list) + + print('Start receiving events behind a proxy.') + + with receiver: + received = receiver.receive(max_batch_size=50, timeout=5) + +except KeyboardInterrupt: + pass + diff --git a/sdk/eventhub/azure-eventhubs/setup.py b/sdk/eventhub/azure-eventhubs/setup.py index 1fdb12ec33d8..6f6cf3399021 100644 --- a/sdk/eventhub/azure-eventhubs/setup.py +++ b/sdk/eventhub/azure-eventhubs/setup.py @@ -8,6 +8,7 @@ import re import os.path +import sys from io import open from setuptools import find_packages, setup @@ -34,6 +35,22 @@ with open('HISTORY.rst') as f: history = f.read() +exclude_packages = [ + 'tests', + "tests.asynctests", + 'examples', + # Exclude packages that will be covered by PEP420 or nspkg + 'azure', + '*.eventprocessorhost', + '*.eventprocessorhost.*' + ] + +if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] < 5): + exclude_packages.extend([ + '*.aio', + '*.aio.*' + ]) + setup( name=PACKAGE_NAME, version=version, @@ -44,28 +61,25 @@ author_email='azpysdkhelp@microsoft.com', url='https://github.com/Azure/azure-sdk-for-python', classifiers=[ - 'Development Status :: 5 - Production/Stable', + 'Development Status :: 3 - Alpha', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.4', + # 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'License :: OSI Approved :: MIT License', ], zip_safe=False, - packages=find_packages(exclude=[ - "azure", - "examples", - "tests", - "tests.asynctests"]), + packages=find_packages(exclude=exclude_packages), install_requires=[ 'uamqp~=1.2.0', 'msrestazure>=0.4.32,<2.0.0', 'azure-common~=1.1', - 'azure-storage-blob~=1.3' + 'azure-storage-blob~=1.3', + 'azure-core~=1.0', ], extras_require={ ":python_version<'3.0'": ['azure-nspkg'], diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py index a8bc39757d87..fdb5c1ffea35 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py @@ -9,43 +9,37 @@ import pytest import time -from azure import eventhub -from azure.eventhub import EventData, Offset, EventHubError, EventHubClientAsync +from azure.eventhub.aio import EventHubClient +from azure.eventhub import EventData, EventPosition, EventHubError async def pump(receiver, sleep=None): messages = 0 if sleep: await asyncio.sleep(sleep) - batch = await receiver.receive(timeout=1) - messages += len(batch) + async with receiver: + batch = await receiver.receive(timeout=1) + messages += len(batch) return messages async def get_partitions(iot_connection_str): - try: - client = EventHubClientAsync.from_iothub_connection_string(iot_connection_str, debug=True) - client.add_async_receiver("$default", "0", prefetch=1000, operation='/messages/events') - await client.run_async() - partitions = await client.get_eventhub_info_async() + client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) + receiver = client.create_receiver(partition_id="0", prefetch=1000, operation='/messages/events') + async with receiver: + partitions = await client.get_properties() return partitions["partition_ids"] - finally: - await client.stop_async() @pytest.mark.liveTest @pytest.mark.asyncio async def test_iothub_receive_multiple_async(iot_connection_str): partitions = await get_partitions(iot_connection_str) - client = EventHubClientAsync.from_iothub_connection_string(iot_connection_str, debug=True) - try: - receivers = [] - for p in partitions: - receivers.append(client.add_async_receiver("$default", p, prefetch=10, operation='/messages/events')) - await client.run_async() - outputs = await asyncio.gather(*[pump(r) for r in receivers]) - - assert isinstance(outputs[0], int) and outputs[0] <= 10 - assert isinstance(outputs[1], int) and outputs[1] <= 10 - finally: - await client.stop_async() + client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) + receivers = [] + for p in partitions: + receivers.append(client.create_receiver(partition_id=p, prefetch=10, operation='/messages/events')) + outputs = await asyncio.gather(*[pump(r) for r in receivers]) + + assert isinstance(outputs[0], int) and outputs[0] <= 10 + assert isinstance(outputs[1], int) and outputs[1] <= 10 diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph.py index 9a51d067e312..78611b3bf2ef 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph.py @@ -13,7 +13,8 @@ import pytest from logging.handlers import RotatingFileHandler -from azure.eventhub import EventHubClientAsync, EventData +from azure.eventhub.aio import EventHubClient +from azure.eventhub import EventData from azure.eventprocessorhost import ( AbstractEventProcessor, AzureStorageCheckpointLeaseManager, @@ -123,13 +124,14 @@ async def pump(pid, sender, duration): total = 0 try: - while time.time() < deadline: - data = EventData(body=b"D" * 512) - sender.transfer(data) - total += 1 - if total % 100 == 0: - await sender.wait_async() - #logger.info("{}: Send total {}".format(pid, total)) + async with sender: + while time.time() < deadline: + data = EventData(body=b"D" * 512) + sender.queue_message(data) + total += 1 + if total % 100 == 0: + await sender.send_pending_messages() + #logger.info("{}: Send total {}".format(pid, total)) except Exception as err: logger.error("{}: Send failed {}".format(pid, err)) raise @@ -164,14 +166,13 @@ def test_long_running_eph(live_eventhub): live_eventhub['key_name'], live_eventhub['access_key'], live_eventhub['event_hub']) - send_client = EventHubClientAsync.from_connection_string(conn_str) + send_client = EventHubClient.from_connection_string(conn_str) pumps = [] for pid in ["0", "1"]: - sender = send_client.add_async_sender(partition=pid, send_timeout=0, keep_alive=False) + sender = send_client.create_sender(partition_id=pid, send_timeout=0, keep_alive=False) pumps.append(pump(pid, sender, 15)) - loop.run_until_complete(send_client.run_async()) results = loop.run_until_complete(asyncio.gather(*pumps, return_exceptions=True)) - loop.run_until_complete(send_client.stop_async()) + assert not any(results) # Eventhub config and storage manager @@ -198,7 +199,7 @@ def test_long_running_eph(live_eventhub): EventProcessor, eh_config, storage_manager, - ep_params=["param1","param2"], + ep_params=["param1", "param2"], eph_options=eh_options, loop=loop) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py index 3c926dd77470..7b4a9021db1d 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py @@ -13,7 +13,8 @@ import pytest from logging.handlers import RotatingFileHandler -from azure.eventhub import EventHubClientAsync, EventData +from azure.eventhub.aio import EventHubClient +from azure.eventhub import EventData from azure.eventprocessorhost import ( AbstractEventProcessor, AzureStorageCheckpointLeaseManager, @@ -128,13 +129,14 @@ async def pump(pid, sender, duration): total = 0 try: - while time.time() < deadline: - data = EventData(body=b"D" * 512) - sender.transfer(data) - total += 1 - if total % 100 == 0: - await sender.wait_async() - #logger.info("{}: Send total {}".format(pid, total)) + async with sender: + while time.time() < deadline: + data = EventData(body=b"D" * 512) + sender.queue_message(data) + total += 1 + if total % 100 == 0: + await sender.send_pending_messages() + #logger.info("{}: Send total {}".format(pid, total)) except Exception as err: logger.error("{}: Send failed {}".format(pid, err)) raise @@ -169,14 +171,12 @@ def test_long_running_context_eph(live_eventhub): live_eventhub['key_name'], live_eventhub['access_key'], live_eventhub['event_hub']) - send_client = EventHubClientAsync.from_connection_string(conn_str) + send_client = EventHubClient.from_connection_string(conn_str) pumps = [] for pid in ["0", "1"]: - sender = send_client.add_async_sender(partition=pid, send_timeout=0, keep_alive=False) + sender = send_client.add_async_sender(partition_id=pid, send_timeout=0) pumps.append(pump(pid, sender, 15)) - loop.run_until_complete(send_client.run_async()) results = loop.run_until_complete(asyncio.gather(*pumps, return_exceptions=True)) - loop.run_until_complete(send_client.stop_async()) assert not any(results) # Eventhub config and storage manager @@ -223,4 +223,4 @@ def test_long_running_context_eph(live_eventhub): config['namespace'] = os.environ['EVENT_HUB_NAMESPACE'] config['consumer_group'] = "$Default" config['partition'] = "0" - test_long_running_eph(config) + test_long_running_context_eph(config) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py index b3e7dca8a2dc..1f50144674b8 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py @@ -18,8 +18,8 @@ import pytest from logging.handlers import RotatingFileHandler -from azure.eventhub import Offset -from azure.eventhub import EventHubClientAsync +from azure.eventhub import EventPosition +from azure.eventhub.aio import EventHubClient def get_logger(filename, level=logging.INFO): @@ -48,7 +48,7 @@ def get_logger(filename, level=logging.INFO): async def get_partitions(client): - eh_data = await client.get_eventhub_info_async() + eh_data = await client.get_properties() return eh_data["partition_ids"] @@ -56,34 +56,37 @@ async def pump(_pid, receiver, _args, _dl): total = 0 iteration = 0 deadline = time.time() + _dl + try: - while time.time() < deadline: - batch = await receiver.receive(timeout=1) - size = len(batch) - total += size - iteration += 1 - if size == 0: - print("{}: No events received, queue size {}, delivered {}".format( - _pid, - receiver.queue_size, - total)) - elif iteration >= 5: - iteration = 0 - print("{}: total received {}, last sn={}, last offset={}".format( - _pid, - total, - batch[-1].sequence_number, - batch[-1].offset.value)) - print("{}: total received {}".format( - _pid, - total)) + async with receiver: + while time.time() < deadline: + batch = await receiver.receive(timeout=1) + size = len(batch) + total += size + iteration += 1 + if size == 0: + print("{}: No events received, queue size {}, delivered {}".format( + _pid, + receiver.queue_size, + total)) + elif iteration >= 5: + iteration = 0 + print("{}: total received {}, last sn={}, last offset={}".format( + _pid, + total, + batch[-1].sequence_number, + batch[-1].offset.value)) + print("{}: total received {}".format( + _pid, + total)) except Exception as e: print("Partition {} receiver failed: {}".format(_pid, e)) raise @pytest.mark.liveTest -def test_long_running_receive_async(connection_str): +@pytest.mark.asyncio +async def test_long_running_receive_async(connection_str): parser = argparse.ArgumentParser() parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) parser.add_argument("--consumer", help="Consumer group name", default="$default") @@ -98,11 +101,11 @@ def test_long_running_receive_async(connection_str): loop = asyncio.get_event_loop() args, _ = parser.parse_known_args() if args.conn_str: - client = EventHubClientAsync.from_connection_string( + client = EventHubClient.from_connection_string( args.conn_str, - eventhub=args.eventhub, auth_timeout=240, debug=False) + eventhub=args.eventhub, auth_timeout=240, network_tracing=False) elif args.address: - client = EventHubClientAsync( + client = EventHubClient( args.address, auth_timeout=240, username=args.sas_policy, @@ -116,22 +119,21 @@ def test_long_running_receive_async(connection_str): try: if not args.partitions: - partitions = loop.run_until_complete(get_partitions(client)) + partitions = await client.get_partition_ids() else: partitions = args.partitions.split(",") pumps = [] for pid in partitions: - receiver = client.add_async_receiver( - consumer_group=args.consumer, - partition=pid, - offset=Offset(args.offset), - prefetch=50) + receiver = client.create_receiver( + partition_id=pid, + event_position=EventPosition(args.offset), + prefetch=50, + loop=loop) pumps.append(pump(pid, receiver, args, args.duration)) - loop.run_until_complete(client.run_async()) - loop.run_until_complete(asyncio.gather(*pumps)) + await asyncio.gather(*pumps) finally: - loop.run_until_complete(client.stop_async()) + pass if __name__ == '__main__': - test_long_running_receive_async(os.environ.get('EVENT_HUB_CONNECTION_STR')) + asyncio.run(test_long_running_receive_async(os.environ.get('EVENT_HUB_CONNECTION_STR'))) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py index 56832f87a87d..dd87e5324558 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py @@ -13,7 +13,8 @@ import pytest from logging.handlers import RotatingFileHandler -from azure.eventhub import EventHubClientAsync, EventData +from azure.eventhub import EventData +from azure.eventhub.aio import EventHubClient def get_logger(filename, level=logging.INFO): @@ -47,7 +48,7 @@ def check_send_successful(outcome, condition): async def get_partitions(args): - eh_data = await args.get_eventhub_info_async() + eh_data = await args.get_properties() return eh_data["partition_ids"] @@ -65,16 +66,17 @@ def data_generator(): logger.info("{}: Sending single messages".format(pid)) try: - while time.time() < deadline: - if args.batch > 1: - data = EventData(batch=data_generator()) - else: - data = EventData(body=b"D" * args.payload) - sender.transfer(data, callback=check_send_successful) - total += args.batch - if total % 100 == 0: - await sender.wait_async() - logger.info("{}: Send total {}".format(pid, total)) + async with sender: + while time.time() < deadline: + if args.batch > 1: + data = EventData(body=data_generator()) + else: + data = EventData(body=b"D" * args.payload) + sender.queue_message(data, callback=check_send_successful) + total += args.batch + if total % 100 == 0: + await sender.send_pending_messages() + logger.info("{}: Send total {}".format(pid, total)) except Exception as err: logger.error("{}: Send failed {}".format(pid, err)) raise @@ -82,7 +84,8 @@ def data_generator(): @pytest.mark.liveTest -def test_long_running_partition_send_async(connection_str): +@pytest.mark.asyncio +async def test_long_running_partition_send_async(connection_str): parser = argparse.ArgumentParser() parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) parser.add_argument("--payload", help="payload size", type=int, default=1024) @@ -99,11 +102,11 @@ def test_long_running_partition_send_async(connection_str): args, _ = parser.parse_known_args() if args.conn_str: - client = EventHubClientAsync.from_connection_string( + client = EventHubClient.from_connection_string( args.conn_str, - eventhub=args.eventhub, debug=True) + eventhub=args.eventhub, network_tracing=True) elif args.address: - client = EventHubClientAsync( + client = EventHubClient( args.address, username=args.sas_policy, password=args.sas_key, @@ -117,7 +120,7 @@ def test_long_running_partition_send_async(connection_str): try: if not args.partitions: - partitions = loop.run_until_complete(get_partitions(client)) + partitions = await client.get_partition_ids() else: pid_range = args.partitions.split("-") if len(pid_range) > 1: @@ -126,16 +129,15 @@ def test_long_running_partition_send_async(connection_str): partitions = args.partitions.split(",") pumps = [] for pid in partitions: - sender = client.add_async_sender(partition=pid, send_timeout=0, keep_alive=False) + sender = client.create_sender(partition_id=pid, send_timeout=0) pumps.append(pump(pid, sender, args, args.duration)) - loop.run_until_complete(client.run_async()) - results = loop.run_until_complete(asyncio.gather(*pumps, return_exceptions=True)) + results = await asyncio.gather(*pumps, return_exceptions=True) assert not results except Exception as e: logger.error("Sender failed: {}".format(e)) finally: - logger.info("Shutting down sender") - loop.run_until_complete(client.stop_async()) + pass + if __name__ == '__main__': - test_long_running_partition_send_async(os.environ.get('EVENT_HUB_CONNECTION_STR')) + asyncio.run(test_long_running_partition_send_async(os.environ.get('EVENT_HUB_CONNECTION_STR'))) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py index 4b2e8b0a367b..4e904d19453f 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py @@ -10,102 +10,104 @@ import time import sys -from azure import eventhub from azure.eventhub import ( - EventHubClientAsync, EventData, - Offset, - EventHubError) - + EventPosition, + EventHubError, + ConnectError, + AuthenticationError, + EventDataError, +) +from azure.eventhub.aio import EventHubClient @pytest.mark.liveTest @pytest.mark.asyncio async def test_send_with_invalid_hostname_async(invalid_hostname, connstr_receivers): _, receivers = connstr_receivers - client = EventHubClientAsync.from_connection_string(invalid_hostname, debug=True) - sender = client.add_async_sender() - with pytest.raises(EventHubError): - await client.run_async() + client = EventHubClient.from_connection_string(invalid_hostname, network_tracing=True) + sender = client.create_sender() + with pytest.raises(AuthenticationError): + await sender._open() @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_with_invalid_hostname_async(invalid_hostname): - client = EventHubClientAsync.from_connection_string(invalid_hostname, debug=True) - sender = client.add_async_receiver("$default", "0") - with pytest.raises(EventHubError): - await client.run_async() + client = EventHubClient.from_connection_string(invalid_hostname, network_tracing=True) + sender = client.create_receiver(partition_id="0") + with pytest.raises(AuthenticationError): + await sender._open() @pytest.mark.liveTest @pytest.mark.asyncio async def test_send_with_invalid_key_async(invalid_key, connstr_receivers): _, receivers = connstr_receivers - client = EventHubClientAsync.from_connection_string(invalid_key, debug=False) - sender = client.add_async_sender() - with pytest.raises(EventHubError): - await client.run_async() + client = EventHubClient.from_connection_string(invalid_key, network_tracing=False) + sender = client.create_sender() + with pytest.raises(AuthenticationError): + await sender._open() @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_with_invalid_key_async(invalid_key): - client = EventHubClientAsync.from_connection_string(invalid_key, debug=True) - sender = client.add_async_receiver("$default", "0") - with pytest.raises(EventHubError): - await client.run_async() + client = EventHubClient.from_connection_string(invalid_key, network_tracing=True) + sender = client.create_receiver(partition_id="0") + with pytest.raises(AuthenticationError): + await sender._open() @pytest.mark.liveTest @pytest.mark.asyncio async def test_send_with_invalid_policy_async(invalid_policy, connstr_receivers): _, receivers = connstr_receivers - client = EventHubClientAsync.from_connection_string(invalid_policy, debug=False) - sender = client.add_async_sender() - with pytest.raises(EventHubError): - await client.run_async() + client = EventHubClient.from_connection_string(invalid_policy, network_tracing=False) + sender = client.create_sender() + with pytest.raises(AuthenticationError): + await sender._open() @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_with_invalid_policy_async(invalid_policy): - client = EventHubClientAsync.from_connection_string(invalid_policy, debug=True) - sender = client.add_async_receiver("$default", "0") - with pytest.raises(EventHubError): - await client.run_async() + client = EventHubClient.from_connection_string(invalid_policy, network_tracing=True) + sender = client.create_receiver(partition_id="0") + with pytest.raises(AuthenticationError): + await sender._open() @pytest.mark.liveTest @pytest.mark.asyncio async def test_send_partition_key_with_partition_async(connection_str): - client = EventHubClientAsync.from_connection_string(connection_str, debug=True) - sender = client.add_async_sender(partition="1") + pytest.skip("Skipped tentatively. Confirm whether to throw ValueError or just warn users") + client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + sender = client.create_sender(partition_id="1") try: - await client.run_async() data = EventData(b"Data") data.partition_key = b"PKey" with pytest.raises(ValueError): await sender.send(data) finally: - await client.stop_async() + await sender.close() @pytest.mark.liveTest @pytest.mark.asyncio async def test_non_existing_entity_sender_async(connection_str): - client = EventHubClientAsync.from_connection_string(connection_str, eventhub="nemo", debug=False) - sender = client.add_async_sender(partition="1") - with pytest.raises(EventHubError): - await client.run_async() + client = EventHubClient.from_connection_string(connection_str, eventhub="nemo", network_tracing=False) + sender = client.create_sender(partition_id="1") + with pytest.raises(AuthenticationError): + await sender._open() @pytest.mark.liveTest @pytest.mark.asyncio async def test_non_existing_entity_receiver_async(connection_str): - client = EventHubClientAsync.from_connection_string(connection_str, eventhub="nemo", debug=False) - receiver = client.add_async_receiver("$default", "0") - with pytest.raises(EventHubError): - await client.run_async() + client = EventHubClient.from_connection_string(connection_str, eventhub="nemo", network_tracing=False) + receiver = client.create_receiver(partition_id="0") + with pytest.raises(AuthenticationError): + await receiver._open() @pytest.mark.liveTest @@ -113,14 +115,13 @@ async def test_non_existing_entity_receiver_async(connection_str): async def test_receive_from_invalid_partitions_async(connection_str): partitions = ["XYZ", "-1", "1000", "-" ] for p in partitions: - client = EventHubClientAsync.from_connection_string(connection_str, debug=True) - receiver = client.add_async_receiver("$default", p) + client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + receiver = client.create_receiver(partition_id=p) try: - with pytest.raises(EventHubError): - await client.run_async() + with pytest.raises(ConnectError): await receiver.receive(timeout=10) finally: - await client.stop_async() + await receiver.close() @pytest.mark.liveTest @@ -128,13 +129,13 @@ async def test_receive_from_invalid_partitions_async(connection_str): async def test_send_to_invalid_partitions_async(connection_str): partitions = ["XYZ", "-1", "1000", "-" ] for p in partitions: - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - sender = client.add_async_sender(partition=p) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + sender = client.create_sender(partition_id=p) try: - with pytest.raises(EventHubError): - await client.run_async() + with pytest.raises(ConnectError): + await sender._open() finally: - await client.stop_async() + await sender.close() @pytest.mark.liveTest @@ -142,63 +143,59 @@ async def test_send_to_invalid_partitions_async(connection_str): async def test_send_too_large_message_async(connection_str): if sys.platform.startswith('darwin'): pytest.skip("Skipping on OSX - open issue regarding message size") - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - sender = client.add_async_sender() + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + sender = client.create_sender() try: - await client.run_async() - data = EventData(b"A" * 300000) - with pytest.raises(EventHubError): + data = EventData(b"A" * 1100000) + with pytest.raises(EventDataError): await sender.send(data) finally: - await client.stop_async() + await sender.close() @pytest.mark.liveTest @pytest.mark.asyncio async def test_send_null_body_async(connection_str): - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) - sender = client.add_async_sender() + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + sender = client.create_sender() try: - await client.run_async() with pytest.raises(ValueError): data = EventData(None) await sender.send(data) finally: - await client.stop_async() + await sender.close() async def pump(receiver): - messages = 0 - count = 0 - batch = await receiver.receive(timeout=10) - while batch and count <= 5: - count += 1 - messages += len(batch) + async with receiver: + messages = 0 + count = 0 batch = await receiver.receive(timeout=10) - return messages + while batch and count <= 5: + count += 1 + messages += len(batch) + batch = await receiver.receive(timeout=10) + return messages @pytest.mark.liveTest @pytest.mark.asyncio async def test_max_receivers_async(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClientAsync.from_connection_string(connection_str, debug=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=True) receivers = [] for i in range(6): - receivers.append(client.add_async_receiver("$default", "0", prefetch=1000, offset=Offset('@latest'))) - try: - await client.run_async() - outputs = await asyncio.gather( - pump(receivers[0]), - pump(receivers[1]), - pump(receivers[2]), - pump(receivers[3]), - pump(receivers[4]), - pump(receivers[5]), - return_exceptions=True) - print(outputs) - failed = [o for o in outputs if isinstance(o, EventHubError)] - assert len(failed) == 1 - print(failed[0].message) - finally: - await client.stop_async() + receivers.append(client.create_receiver(partition_id="0", prefetch=1000, event_position=EventPosition('@latest'))) + + outputs = await asyncio.gather( + pump(receivers[0]), + pump(receivers[1]), + pump(receivers[2]), + pump(receivers[3]), + pump(receivers[4]), + pump(receivers[5]), + return_exceptions=True) + print(outputs) + failed = [o for o in outputs if isinstance(o, EventHubError)] + assert len(failed) == 1 + print(failed[0].message) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_properties_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_properties_async.py new file mode 100644 index 000000000000..20641033e5bb --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_properties_async.py @@ -0,0 +1,45 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import pytest +from azure.eventhub import SharedKeyCredentials +from azure.eventhub.aio import EventHubClient + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_get_properties(live_eventhub): + client = EventHubClient(live_eventhub['hostname'], live_eventhub['event_hub'], + SharedKeyCredentials(live_eventhub['key_name'], live_eventhub['access_key']) + ) + properties = await client.get_properties() + assert properties['path'] == live_eventhub['event_hub'] and properties['partition_ids'] == ['0', '1'] + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_get_partition_ids(live_eventhub): + client = EventHubClient(live_eventhub['hostname'], live_eventhub['event_hub'], + SharedKeyCredentials(live_eventhub['key_name'], live_eventhub['access_key']) + ) + partition_ids = await client.get_partition_ids() + assert partition_ids == ['0', '1'] + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_get_partition_properties(live_eventhub): + client = EventHubClient(live_eventhub['hostname'], live_eventhub['event_hub'], + SharedKeyCredentials(live_eventhub['key_name'], live_eventhub['access_key']) + ) + properties = await client.get_partition_properties('0') + assert properties['event_hub_path'] == live_eventhub['event_hub'] \ + and properties['id'] == '0' \ + and 'beginning_sequence_number' in properties \ + and 'last_enqueued_sequence_number' in properties \ + and 'last_enqueued_offset' in properties \ + and 'last_enqueued_time_utc' in properties \ + and 'is_empty' in properties diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py index 1be11107dae0..18db98649264 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py @@ -9,7 +9,7 @@ import pytest import time -from azure.eventhub import EventData, EventPosition, EventHubError +from azure.eventhub import EventData, EventPosition, EventHubError, TransportType from azure.eventhub.aio import EventHubClient @@ -17,8 +17,8 @@ @pytest.mark.asyncio async def test_receive_end_of_stream_async(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 @@ -33,8 +33,8 @@ async def test_receive_end_of_stream_async(connstr_senders): @pytest.mark.asyncio async def test_receive_with_offset_async(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 @@ -44,7 +44,7 @@ async def test_receive_with_offset_async(connstr_senders): assert len(received) == 1 offset = received[0].offset - offset_receiver = client.create_receiver("$default", "0", offset=offset) + offset_receiver = client.create_receiver(partition_id="0", event_position=offset) async with offset_receiver: received = await offset_receiver.receive(timeout=5) assert len(received) == 0 @@ -57,8 +57,8 @@ async def test_receive_with_offset_async(connstr_senders): @pytest.mark.asyncio async def test_receive_with_inclusive_offset_async(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 @@ -68,7 +68,7 @@ async def test_receive_with_inclusive_offset_async(connstr_senders): assert len(received) == 1 offset = received[0].offset - offset_receiver = client.create_receiver("$default", "0", offset=EventPosition(offset.value, inclusive=True)) + offset_receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset.value, inclusive=True)) async with offset_receiver: received = await offset_receiver.receive(timeout=5) assert len(received) == 1 @@ -78,8 +78,8 @@ async def test_receive_with_inclusive_offset_async(connstr_senders): @pytest.mark.asyncio async def test_receive_with_datetime_async(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 @@ -88,7 +88,7 @@ async def test_receive_with_datetime_async(connstr_senders): assert len(received) == 1 offset = received[0].enqueued_time - offset_receiver = client.create_receiver("$default", "0", offset=EventPosition(offset)) + offset_receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset)) async with offset_receiver: received = await offset_receiver.receive(timeout=5) assert len(received) == 0 @@ -101,9 +101,10 @@ async def test_receive_with_datetime_async(connstr_senders): @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_with_sequence_no_async(connstr_senders): + # TODO: sampe problem as the sync version connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 @@ -112,7 +113,7 @@ async def test_receive_with_sequence_no_async(connstr_senders): assert len(received) == 1 offset = received[0].sequence_number - offset_receiver = client.create_receiver("$default", "0", offset=EventPosition(offset)) + offset_receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset)) async with offset_receiver: received = await offset_receiver.receive(timeout=5) assert len(received) == 0 @@ -126,8 +127,8 @@ async def test_receive_with_sequence_no_async(connstr_senders): @pytest.mark.asyncio async def test_receive_with_inclusive_sequence_no_async(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 @@ -136,7 +137,7 @@ async def test_receive_with_inclusive_sequence_no_async(connstr_senders): assert len(received) == 1 offset = received[0].sequence_number - offset_receiver = client.create_receiver("$default", "0", offset=EventPosition(offset, inclusive=True)) + offset_receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset, inclusive=True)) async with offset_receiver: received = await offset_receiver.receive(timeout=5) assert len(received) == 1 @@ -146,8 +147,8 @@ async def test_receive_with_inclusive_sequence_no_async(connstr_senders): @pytest.mark.asyncio async def test_receive_batch_async(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.create_receiver("$default", "0", prefetch=500, offset=EventPosition('@latest')) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", prefetch=500, event_position=EventPosition('@latest')) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 @@ -174,19 +175,19 @@ async def pump(receiver, sleep=None): @pytest.mark.liveTest @pytest.mark.asyncio -async def test_epoch_receiver_async(connstr_senders): +async def test_exclusive_receiver_async(connstr_senders): connection_str, senders = connstr_senders senders[0].send(EventData(b"Receiving only a single event")) - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) receivers = [] - for epoch in [10, 20]: - receivers.append(client.create_epoch_receiver("$default", "0", epoch, prefetch=5)) + for exclusive_receiver_priority in [10, 20]: + receivers.append(client.create_receiver(partition_id="0", exclusive_receiver_priority=exclusive_receiver_priority, prefetch=5)) outputs = await asyncio.gather( pump(receivers[0]), pump(receivers[1]), return_exceptions=True) - assert isinstance(outputs[0], EventHubError) + assert isinstance(outputs[0], EventHubError) # TODO; it's LinkDetach error assert outputs[1] == 1 @@ -196,14 +197,14 @@ async def test_multiple_receiver_async(connstr_senders): connection_str, senders = connstr_senders senders[0].send(EventData(b"Receiving only a single event")) - client = EventHubClient.from_connection_string(connection_str, debug=True) - partitions = await client.get_eventhub_information() + client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + partitions = await client.get_properties() assert partitions["partition_ids"] == ["0", "1"] receivers = [] for i in range(2): - receivers.append(client.create_receiver("$default", "0", prefetch=10)) + receivers.append(client.create_receiver(partition_id="0", prefetch=10)) try: - more_partitions = await client.get_eventhub_information() + more_partitions = await client.get_properties() assert more_partitions["partition_ids"] == ["0", "1"] outputs = await asyncio.gather( pump(receivers[0]), @@ -218,14 +219,14 @@ async def test_multiple_receiver_async(connstr_senders): @pytest.mark.liveTest @pytest.mark.asyncio -async def test_epoch_receiver_after_non_epoch_receiver_async(connstr_senders): +async def test_exclusive_receiver_after_non_exclusive_receiver_async(connstr_senders): connection_str, senders = connstr_senders senders[0].send(EventData(b"Receiving only a single event")) - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) receivers = [] - receivers.append(client.create_receiver("$default", "0", prefetch=10)) - receivers.append(client.create_epoch_receiver("$default", "0", 15, prefetch=10)) + receivers.append(client.create_receiver(partition_id="0", prefetch=10)) + receivers.append(client.create_receiver(partition_id="0", exclusive_receiver_priority=15, prefetch=10)) try: outputs = await asyncio.gather( pump(receivers[0]), @@ -240,14 +241,14 @@ async def test_epoch_receiver_after_non_epoch_receiver_async(connstr_senders): @pytest.mark.liveTest @pytest.mark.asyncio -async def test_non_epoch_receiver_after_epoch_receiver_async(connstr_senders): +async def test_non_exclusive_receiver_after_exclusive_receiver_async(connstr_senders): connection_str, senders = connstr_senders senders[0].send(EventData(b"Receiving only a single event")) - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) receivers = [] - receivers.append(client.create_epoch_receiver("$default", "0", 15, prefetch=10)) - receivers.append(client.create_receiver("$default", "0", prefetch=10)) + receivers.append(client.create_receiver(partition_id="0", exclusive_receiver_priority=15, prefetch=10)) + receivers.append(client.create_receiver(partition_id="0", prefetch=10)) try: outputs = await asyncio.gather( pump(receivers[0]), @@ -263,7 +264,6 @@ async def test_non_epoch_receiver_after_epoch_receiver_async(connstr_senders): @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_batch_with_app_prop_async(connstr_senders): - #pytest.skip("Waiting on uAMQP release") connection_str, senders = connstr_senders app_prop_key = "raw_prop" app_prop_value = "raw_value" @@ -279,13 +279,13 @@ def batched(): ed.application_properties = app_prop yield ed - client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.create_receiver("$default", "0", prefetch=500, offset=EventPosition('@latest')) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", prefetch=500, event_position=EventPosition('@latest')) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 - senders[0].send_batch(batched()) + senders[0].send(batched()) await asyncio.sleep(1) @@ -296,3 +296,27 @@ def batched(): assert list(message.body)[0] == "Event Data {}".format(index).encode('utf-8') assert (app_prop_key.encode('utf-8') in message.application_properties) \ and (dict(message.application_properties)[app_prop_key.encode('utf-8')] == app_prop_value.encode('utf-8')) + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_receive_over_websocket_async(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClient.from_connection_string(connection_str, transport_type=TransportType.AmqpOverWebsocket, network_tracing=False) + receiver = client.create_receiver(partition_id="0", prefetch=500, event_position=EventPosition('@latest')) + + event_list = [] + for i in range(20): + event_list.append(EventData("Event Number {}".format(i))) + + async with receiver: + received = await receiver.receive(timeout=5) + assert len(received) == 0 + + with senders[0]: + senders[0].send(event_list) + + time.sleep(1) + + received = await receiver.receive(max_batch_size=50, timeout=5) + assert len(received) == 20 diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py index 9fafc0dc0069..ebbace8c8a05 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py @@ -9,43 +9,44 @@ import asyncio import pytest -from azure import eventhub from azure.eventhub import ( - EventHubClientAsync, EventData, - Offset, + EventPosition, EventHubError) +from azure.eventhub.aio import EventHubClient @pytest.mark.liveTest @pytest.mark.asyncio async def test_send_with_long_interval_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClientAsync.from_connection_string(connection_str, debug=True) - sender = client.add_async_sender() + client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + sender = client.create_sender() try: - await client.run_async() await sender.send(EventData(b"A single event")) - for _ in range(2): - await asyncio.sleep(300) + for _ in range(1): + #await asyncio.sleep(300) + sender._handler._connection._conn.destroy() await sender.send(EventData(b"A single event")) finally: - await client.stop_async() + await sender.close() received = [] for r in receivers: - received.extend(r.receive(timeout=1)) - assert len(received) == 3 + r._handler._connection._conn.destroy() + received.extend(r.receive(timeout=1)) + assert len(received) == 2 assert list(received[0].body)[0] == b"A single event" def pump(receiver): messages = [] - batch = receiver.receive(timeout=1) - messages.extend(batch) - while batch: + with receiver: batch = receiver.receive(timeout=1) messages.extend(batch) + while batch: + batch = receiver.receive(timeout=1) + messages.extend(batch) return messages @@ -53,10 +54,9 @@ def pump(receiver): @pytest.mark.asyncio async def test_send_with_forced_conn_close_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClientAsync.from_connection_string(connection_str, debug=True) - sender = client.add_async_sender() + client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + sender = client.create_sender() try: - await client.run_async() await sender.send(EventData(b"A single event")) sender._handler._message_sender.destroy() await asyncio.sleep(300) @@ -67,28 +67,10 @@ async def test_send_with_forced_conn_close_async(connstr_receivers): await sender.send(EventData(b"A single event")) await sender.send(EventData(b"A single event")) finally: - await client.stop_async() + await sender.close() received = [] for r in receivers: received.extend(pump(r)) assert len(received) == 5 assert list(received[0].body)[0] == b"A single event" - - -# def test_send_with_forced_link_detach(connstr_receivers): -# connection_str, receivers = connstr_receivers -# client = EventHubClient.from_connection_string(connection_str, debug=True) -# sender = client.add_sender() -# size = 20 * 1024 -# try: -# client.run() -# for i in range(1000): -# sender.transfer(EventData([b"A"*size, b"B"*size, b"C"*size, b"D"*size, b"A"*size, b"B"*size, b"C"*size, b"D"*size, b"A"*size, b"B"*size, b"C"*size, b"D"*size])) -# sender.wait() -# finally: -# client.stop() - -# received = [] -# for r in receivers: -# received.extend(r.receive(timeout=10)) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py index b17dad9cae2c..9883be044345 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py @@ -11,7 +11,7 @@ import time import json -from azure.eventhub import EventData +from azure.eventhub import EventData, TransportType from azure.eventhub.aio import EventHubClient @@ -19,34 +19,35 @@ @pytest.mark.asyncio async def test_send_with_partition_key_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() - data_val = 0 - for partition in [b"a", b"b", b"c", b"d", b"e", b"f"]: - partition_key = b"test_partition_" + partition - for i in range(50): - data = EventData(str(data_val)) - data.partition_key = partition_key - data_val += 1 - await sender.send(data) + async with sender: + data_val = 0 + for partition in [b"a", b"b", b"c", b"d", b"e", b"f"]: + partition_key = b"test_partition_" + partition + for i in range(50): + data = EventData(str(data_val)) + # data.partition_key = partition_key + data_val += 1 + await sender.send(data, batching_label=partition_key) found_partition_keys = {} for index, partition in enumerate(receivers): received = partition.receive(timeout=5) for message in received: try: - existing = found_partition_keys[message.partition_key] + existing = found_partition_keys[message._batching_label] assert existing == index except KeyError: - found_partition_keys[message.partition_key] = index + found_partition_keys[message._batching_label] = index @pytest.mark.liveTest @pytest.mark.asyncio async def test_send_and_receive_zero_length_body_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() async with sender: await sender.send(EventData("")) @@ -63,7 +64,7 @@ async def test_send_and_receive_zero_length_body_async(connstr_receivers): @pytest.mark.asyncio async def test_send_single_event_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() async with sender: await sender.send(EventData(b"A single event")) @@ -80,14 +81,15 @@ async def test_send_single_event_async(connstr_receivers): @pytest.mark.asyncio async def test_send_batch_async(connstr_receivers): connection_str, receivers = connstr_receivers + def batched(): for i in range(10): yield EventData("Event number {}".format(i)) - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() async with sender: - await sender.send_batch(batched()) + await sender.send(batched()) time.sleep(1) received = [] @@ -103,8 +105,8 @@ def batched(): @pytest.mark.asyncio async def test_send_partition_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.create_sender(partition="1") + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + sender = client.create_sender(partition_id="1") async with sender: await sender.send(EventData(b"Data")) @@ -118,8 +120,8 @@ async def test_send_partition_async(connstr_receivers): @pytest.mark.asyncio async def test_send_non_ascii_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.create_sender(partition="0") + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + sender = client.create_sender(partition_id="0") async with sender: await sender.send(EventData("é,è,à,ù,â,ê,î,ô,û")) await sender.send(EventData(json.dumps({"foo": "漢字"}))) @@ -139,10 +141,10 @@ def batched(): for i in range(10): yield EventData("Event number {}".format(i)) - client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.create_sender(partition="1") + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + sender = client.create_sender(partition_id="1") async with sender: - await sender.send_batch(batched()) + await sender.send(batched()) partition_0 = receivers[0].receive(timeout=2) assert len(partition_0) == 0 @@ -154,7 +156,7 @@ def batched(): @pytest.mark.asyncio async def test_send_array_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() async with sender: await sender.send(EventData([b"A", b"B", b"C"])) @@ -171,9 +173,9 @@ async def test_send_array_async(connstr_receivers): @pytest.mark.asyncio async def test_send_multiple_clients_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=False) - sender_0 = client.create_sender(partition="0") - sender_1 = client.create_sender(partition="1") + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + sender_0 = client.create_sender(partition_id="0") + sender_1 = client.create_sender(partition_id="1") async with sender_0 and sender_1: await sender_0.send(EventData(b"Message 0")) await sender_1.send(EventData(b"Message 1")) @@ -187,7 +189,6 @@ async def test_send_multiple_clients_async(connstr_receivers): @pytest.mark.liveTest @pytest.mark.asyncio async def test_send_batch_with_app_prop_async(connstr_receivers): - # pytest.skip("Waiting on uAMQP release") connection_str, receivers = connstr_receivers app_prop_key = "raw_prop" app_prop_value = "raw_value" @@ -197,16 +198,16 @@ def batched(): for i in range(10): ed = EventData("Event number {}".format(i)) ed.application_properties = app_prop - yield "Event number {}".format(i) + yield ed for i in range(10, 20): ed = EventData("Event number {}".format(i)) ed.application_properties = app_prop - yield "Event number {}".format(i) + yield ed - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() async with sender: - await sender.send_batch(batched()) + await sender.send(batched()) time.sleep(1) @@ -219,3 +220,25 @@ def batched(): assert list(message.body)[0] == "Event number {}".format(index).encode('utf-8') assert (app_prop_key.encode('utf-8') in message.application_properties) \ and (dict(message.application_properties)[app_prop_key.encode('utf-8')] == app_prop_value.encode('utf-8')) + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_send_over_websocket_async(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClient.from_connection_string(connection_str, transport_type=TransportType.AmqpOverWebsocket, network_tracing=False) + sender = client.create_sender() + + event_list = [] + for i in range(20): + event_list.append(EventData("Event Number {}".format(i))) + + async with sender: + await sender.send(event_list) + + time.sleep(1) + received = [] + for r in receivers: + received.extend(r.receive(timeout=3)) + + assert len(received) == 20 diff --git a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py index ce3db34940e8..82add37ef868 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py @@ -8,19 +8,19 @@ import pytest import time -from azure import eventhub -from azure.eventhub import EventData, EventHubClient, Offset +from azure.eventhub import EventData, EventHubClient @pytest.mark.liveTest def test_iothub_receive_sync(iot_connection_str, device_id): - client = EventHubClient.from_iothub_connection_string(iot_connection_str, debug=True) - receiver = client.add_receiver("$default", "0", operation='/messages/events') + pytest.skip("current code will cause ErrorCodes.LinkRedirect") + client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) + receiver = client.create_receiver(partition_id="0", operation='/messages/events') + receiver._open() try: - client.run() - partitions = client.get_eventhub_info() + partitions = client.get_properties() assert partitions["partition_ids"] == ["0", "1", "2", "3"] received = receiver.receive(timeout=5) assert len(received) == 0 finally: - client.stop() \ No newline at end of file + receiver.close() diff --git a/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py b/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py index 96d4adaa4cf1..df7a184a227e 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py @@ -11,19 +11,16 @@ from uamqp.message import MessageProperties -from azure import eventhub from azure.eventhub import EventData, EventHubClient @pytest.mark.liveTest def test_iothub_send_single_event(iot_connection_str, device_id): - client = EventHubClient.from_iothub_connection_string(iot_connection_str, debug=True) - sender = client.add_sender(operation='/messages/devicebound') + client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) + sender = client.create_sender(operation='/messages/devicebound') try: - client.run() - outcome = sender.send(EventData(b"A single event", to_device=device_id)) - assert outcome.value == 0 + sender.send(EventData(b"A single event", to_device=device_id)) except: raise finally: - client.stop() + sender.close() diff --git a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py index 1afbd9c05103..7ae53a0b2496 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py @@ -18,7 +18,7 @@ from logging.handlers import RotatingFileHandler -from azure.eventhub import Offset +from azure.eventhub import EventPosition from azure.eventhub import EventHubClient def get_logger(filename, level=logging.INFO): @@ -47,7 +47,7 @@ def get_logger(filename, level=logging.INFO): def get_partitions(args): - eh_data = args.get_eventhub_info() + eh_data = args.get_properties() return eh_data["partition_ids"] @@ -97,7 +97,7 @@ def test_long_running_receive(connection_str): if args.conn_str: client = EventHubClient.from_connection_string( args.conn_str, - eventhub=args.eventhub, debug=False) + eventhub=args.eventhub, network_tracing=False) elif args.address: client = EventHubClient( args.address, @@ -117,15 +117,14 @@ def test_long_running_receive(connection_str): partitions = args.partitions.split(",") pumps = {} for pid in partitions: - pumps[pid] = client.add_receiver( - consumer_group=args.consumer, - partition=pid, - offset=Offset(args.offset), + pumps[pid] = client.create_receiver( + partition_id=pid, + event_position=EventPosition(args.offset), prefetch=50) - client.run() pump(pumps, args.duration) finally: - client.stop() + for pid in partitions: + pumps[pid].close() if __name__ == '__main__': diff --git a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py index 31744d8550dd..90c6d0dc3cf9 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py @@ -51,8 +51,7 @@ def check_send_successful(outcome, condition): def main(client, args): - sender = client.add_sender() - client.run() + sender = client.create_sender() deadline = time.time() + args.duration total = 0 @@ -70,16 +69,16 @@ def data_generator(): if args.batch > 1: data = EventData(batch=data_generator()) else: - data = EventData(body=b"D" * args.payload) - sender.transfer(data, callback=check_send_successful) + data = EventData(batch=b"D" * args.payload) + sender.queue_message(data, callback=check_send_successful) total += args.batch if total % 10000 == 0: - sender.wait() - print("Send total {}".format(total)) + sender.send_pending_messages() + print("Send total {}".format(total)) except Exception as err: print("Send failed {}".format(err)) finally: - client.stop() + sender.close() print("Sent total {}".format(total)) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_negative.py b/sdk/eventhub/azure-eventhubs/tests/test_negative.py index 28fd7493ef13..206c5b415002 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_negative.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_negative.py @@ -9,159 +9,156 @@ import time import sys -from azure import eventhub from azure.eventhub import ( EventData, - Offset, + EventPosition, EventHubError, + AuthenticationError, + ConnectError, + EventDataError, EventHubClient) @pytest.mark.liveTest def test_send_with_invalid_hostname(invalid_hostname, connstr_receivers): _, receivers = connstr_receivers - client = EventHubClient.from_connection_string(invalid_hostname, debug=False) - sender = client.add_sender() - with pytest.raises(EventHubError): - client.run() + client = EventHubClient.from_connection_string(invalid_hostname, network_tracing=False) + sender = client.create_sender() + with pytest.raises(AuthenticationError): + sender._open() @pytest.mark.liveTest def test_receive_with_invalid_hostname_sync(invalid_hostname): - client = EventHubClient.from_connection_string(invalid_hostname, debug=True) - receiver = client.add_receiver("$default", "0") - with pytest.raises(EventHubError): - client.run() + client = EventHubClient.from_connection_string(invalid_hostname, network_tracing=True) + receiver = client.create_receiver(partition_id="0") + with pytest.raises(AuthenticationError): + receiver._open() @pytest.mark.liveTest def test_send_with_invalid_key(invalid_key, connstr_receivers): _, receivers = connstr_receivers - client = EventHubClient.from_connection_string(invalid_key, debug=False) - sender = client.add_sender() - with pytest.raises(EventHubError): - client.run() + client = EventHubClient.from_connection_string(invalid_key, network_tracing=False) + sender = client.create_sender() + with pytest.raises(AuthenticationError): + sender._open() @pytest.mark.liveTest def test_receive_with_invalid_key_sync(invalid_key): - client = EventHubClient.from_connection_string(invalid_key, debug=True) - receiver = client.add_receiver("$default", "0") - with pytest.raises(EventHubError): - client.run() + client = EventHubClient.from_connection_string(invalid_key, network_tracing=True) + receiver = client.create_receiver(partition_id="0") + with pytest.raises(AuthenticationError): + receiver._open() @pytest.mark.liveTest def test_send_with_invalid_policy(invalid_policy, connstr_receivers): _, receivers = connstr_receivers - client = EventHubClient.from_connection_string(invalid_policy, debug=False) - sender = client.add_sender() - with pytest.raises(EventHubError): - client.run() + client = EventHubClient.from_connection_string(invalid_policy, network_tracing=False) + sender = client.create_sender() + with pytest.raises(AuthenticationError): + sender._open() @pytest.mark.liveTest def test_receive_with_invalid_policy_sync(invalid_policy): - client = EventHubClient.from_connection_string(invalid_policy, debug=True) - receiver = client.add_receiver("$default", "0") - with pytest.raises(EventHubError): - client.run() + client = EventHubClient.from_connection_string(invalid_policy, network_tracing=True) + receiver = client.create_receiver(partition_id="0") + with pytest.raises(AuthenticationError): + receiver._open() @pytest.mark.liveTest def test_send_partition_key_with_partition_sync(connection_str): - client = EventHubClient.from_connection_string(connection_str, debug=True) - sender = client.add_sender(partition="1") + pytest.skip("Skipped tentatively. Confirm whether to throw ValueError or just warn users") + client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + sender = client.create_sender(partition_id="1") try: - client.run() data = EventData(b"Data") data.partition_key = b"PKey" with pytest.raises(ValueError): sender.send(data) finally: - client.stop() + sender.close() @pytest.mark.liveTest def test_non_existing_entity_sender(connection_str): - client = EventHubClient.from_connection_string(connection_str, eventhub="nemo", debug=False) - sender = client.add_sender(partition="1") - with pytest.raises(EventHubError): - client.run() + client = EventHubClient.from_connection_string(connection_str, eventhub="nemo", network_tracing=False) + sender = client.create_sender(partition_id="1") + with pytest.raises(AuthenticationError): + sender._open() @pytest.mark.liveTest def test_non_existing_entity_receiver(connection_str): - client = EventHubClient.from_connection_string(connection_str, eventhub="nemo", debug=False) - receiver = client.add_receiver("$default", "0") - with pytest.raises(EventHubError): - client.run() + client = EventHubClient.from_connection_string(connection_str, eventhub="nemo", network_tracing=False) + receiver = client.create_receiver(partition_id="0") + with pytest.raises(AuthenticationError): + receiver._open() @pytest.mark.liveTest def test_receive_from_invalid_partitions_sync(connection_str): partitions = ["XYZ", "-1", "1000", "-" ] for p in partitions: - client = EventHubClient.from_connection_string(connection_str, debug=True) - receiver = client.add_receiver("$default", p) + client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + receiver = client.create_receiver(partition_id=p) try: - with pytest.raises(EventHubError): - client.run() + with pytest.raises(ConnectError): receiver.receive(timeout=10) finally: - client.stop() + receiver.close() @pytest.mark.liveTest def test_send_to_invalid_partitions(connection_str): partitions = ["XYZ", "-1", "1000", "-" ] for p in partitions: - client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.add_sender(partition=p) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + sender = client.create_sender(partition_id=p) try: - with pytest.raises(EventHubError): - client.run() + with pytest.raises(ConnectError): + sender._open() finally: - client.stop() + sender.close() @pytest.mark.liveTest def test_send_too_large_message(connection_str): if sys.platform.startswith('darwin'): pytest.skip("Skipping on OSX - open issue regarding message size") - client = EventHubClient.from_connection_string(connection_str, debug=True) - sender = client.add_sender() + client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + sender = client.create_sender() try: - client.run() - data = EventData(b"A" * 300000) - with pytest.raises(EventHubError): + data = EventData(b"A" * 1100000) + with pytest.raises(EventDataError): sender.send(data) finally: - client.stop() + sender.close() @pytest.mark.liveTest def test_send_null_body(connection_str): - partitions = ["XYZ", "-1", "1000", "-" ] - client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.add_sender() + partitions = ["XYZ", "-1", "1000", "-"] + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + sender = client.create_sender() try: - client.run() with pytest.raises(ValueError): data = EventData(None) sender.send(data) finally: - client.stop() + sender.close() @pytest.mark.liveTest def test_message_body_types(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.add_receiver("$default", "0", offset=Offset('@latest')) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) try: - client.run() - received = receiver.receive(timeout=5) assert len(received) == 0 senders[0].send(EventData(b"Bytes Data")) @@ -207,4 +204,4 @@ def test_message_body_types(connstr_senders): except: raise finally: - client.stop() \ No newline at end of file + receiver.close() diff --git a/sdk/eventhub/azure-eventhubs/tests/test_properties.py b/sdk/eventhub/azure-eventhubs/tests/test_properties.py new file mode 100644 index 000000000000..b1889bdcf179 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/tests/test_properties.py @@ -0,0 +1,41 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import pytest +from azure.eventhub import EventHubClient, SharedKeyCredentials + + +@pytest.mark.liveTest +def test_get_properties(live_eventhub): + client = EventHubClient(live_eventhub['hostname'], live_eventhub['event_hub'], + SharedKeyCredentials(live_eventhub['key_name'], live_eventhub['access_key']) + ) + properties = client.get_properties() + assert properties['path'] == live_eventhub['event_hub'] and properties['partition_ids'] == ['0', '1'] + + +@pytest.mark.liveTest +def test_get_partition_ids(live_eventhub): + client = EventHubClient(live_eventhub['hostname'], live_eventhub['event_hub'], + SharedKeyCredentials(live_eventhub['key_name'], live_eventhub['access_key']) + ) + partition_ids = client.get_partition_ids() + assert partition_ids == ['0', '1'] + + +@pytest.mark.liveTest +def test_get_partition_properties(live_eventhub): + client = EventHubClient(live_eventhub['hostname'], live_eventhub['event_hub'], + SharedKeyCredentials(live_eventhub['key_name'], live_eventhub['access_key']) + ) + properties = client.get_partition_properties('0') + assert properties['event_hub_path'] == live_eventhub['event_hub'] \ + and properties['id'] == '0' \ + and 'beginning_sequence_number' in properties \ + and 'last_enqueued_sequence_number' in properties \ + and 'last_enqueued_offset' in properties \ + and 'last_enqueued_time_utc' in properties \ + and 'is_empty' in properties diff --git a/sdk/eventhub/azure-eventhubs/tests/test_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_receive.py index 51fbb3a6079a..38944e553bdf 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_receive.py @@ -9,13 +9,13 @@ import time import datetime -from azure.eventhub import EventData, EventHubClient, EventPosition +from azure.eventhub import EventData, EventHubClient, EventPosition, TransportType # def test_receive_without_events(connstr_senders): # connection_str, senders = connstr_senders -# client = EventHubClient.from_connection_string(connection_str, debug=True) -# receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) +# client = EventHubClient.from_connection_string(connection_str, network_tracing=True) +# receiver = client.create_receiver("$default", "0", event_position=EventPosition('@latest')) # finish = datetime.datetime.now() + datetime.timedelta(seconds=240) # count = 0 # try: @@ -36,8 +36,8 @@ @pytest.mark.liveTest def test_receive_end_of_stream(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 @@ -52,12 +52,12 @@ def test_receive_end_of_stream(connstr_senders): @pytest.mark.liveTest def test_receive_with_offset_sync(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, debug=False) - partitions = client.get_eventhub_information() + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + partitions = client.get_properties() assert partitions["partition_ids"] == ["0", "1"] - receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) with receiver: - more_partitions = client.get_eventhub_information() + more_partitions = client.get_properties() assert more_partitions["partition_ids"] == ["0", "1"] received = receiver.receive(timeout=5) @@ -70,7 +70,7 @@ def test_receive_with_offset_sync(connstr_senders): assert list(received[0].body) == [b'Data'] assert received[0].body_as_str() == "Data" - offset_receiver = client.create_receiver("$default", "0", offset=offset) + offset_receiver = client.create_receiver(partition_id="0", event_position=offset) with offset_receiver: received = offset_receiver.receive(timeout=5) assert len(received) == 0 @@ -82,8 +82,8 @@ def test_receive_with_offset_sync(connstr_senders): @pytest.mark.liveTest def test_receive_with_inclusive_offset(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) with receiver: received = receiver.receive(timeout=5) @@ -97,7 +97,7 @@ def test_receive_with_inclusive_offset(connstr_senders): assert list(received[0].body) == [b'Data'] assert received[0].body_as_str() == "Data" - offset_receiver = client.create_receiver("$default", "0", offset=EventPosition(offset.value, inclusive=True)) + offset_receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset.value, inclusive=True)) with offset_receiver: received = offset_receiver.receive(timeout=5) assert len(received) == 1 @@ -106,12 +106,12 @@ def test_receive_with_inclusive_offset(connstr_senders): @pytest.mark.liveTest def test_receive_with_datetime_sync(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, debug=False) - partitions = client.get_eventhub_information() + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + partitions = client.get_properties() assert partitions["partition_ids"] == ["0", "1"] - receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) with receiver: - more_partitions = client.get_eventhub_information() + more_partitions = client.get_properties() assert more_partitions["partition_ids"] == ["0", "1"] received = receiver.receive(timeout=5) assert len(received) == 0 @@ -123,7 +123,7 @@ def test_receive_with_datetime_sync(connstr_senders): assert list(received[0].body) == [b'Data'] assert received[0].body_as_str() == "Data" - offset_receiver = client.create_receiver("$default", "0", offset=EventPosition(offset)) + offset_receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset)) with offset_receiver: received = offset_receiver.receive(timeout=5) assert len(received) == 0 @@ -135,7 +135,7 @@ def test_receive_with_datetime_sync(connstr_senders): @pytest.mark.liveTest def test_receive_with_custom_datetime_sync(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) for i in range(5): senders[0].send(EventData(b"Message before timestamp")) time.sleep(60) @@ -145,7 +145,7 @@ def test_receive_with_custom_datetime_sync(connstr_senders): for i in range(5): senders[0].send(EventData(b"Message after timestamp")) - receiver = client.create_receiver("$default", "0", offset=EventPosition(offset)) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset)) with receiver: all_received = [] received = receiver.receive(timeout=1) @@ -162,8 +162,9 @@ def test_receive_with_custom_datetime_sync(connstr_senders): @pytest.mark.liveTest def test_receive_with_sequence_no(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 @@ -173,7 +174,7 @@ def test_receive_with_sequence_no(connstr_senders): assert len(received) == 1 offset = received[0].sequence_number - offset_receiver = client.create_receiver("$default", "0", offset=EventPosition(offset)) + offset_receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset, False)) with offset_receiver: received = offset_receiver.receive(timeout=5) assert len(received) == 0 @@ -182,12 +183,11 @@ def test_receive_with_sequence_no(connstr_senders): received = offset_receiver.receive(timeout=5) assert len(received) == 1 - @pytest.mark.liveTest def test_receive_with_inclusive_sequence_no(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.create_receiver("$default", "0", offset=EventPosition('@latest')) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 @@ -195,7 +195,7 @@ def test_receive_with_inclusive_sequence_no(connstr_senders): received = receiver.receive(timeout=5) assert len(received) == 1 offset = received[0].sequence_number - offset_receiver = client.create_receiver("$default", "0", offset=EventPosition(offset, inclusive=True)) + offset_receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset, inclusive=True)) with offset_receiver: received = offset_receiver.receive(timeout=5) assert len(received) == 1 @@ -204,8 +204,8 @@ def test_receive_with_inclusive_sequence_no(connstr_senders): @pytest.mark.liveTest def test_receive_batch(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.create_receiver("$default", "0", prefetch=500, offset=EventPosition('@latest')) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", prefetch=500, event_position=EventPosition('@latest')) with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 @@ -233,13 +233,13 @@ def batched(): ed.application_properties = batch_app_prop yield ed - client = EventHubClient.from_connection_string(connection_str, debug=False) - receiver = client.create_receiver("$default", "0", prefetch=500, offset=EventPosition('@latest')) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", prefetch=500, event_position=EventPosition('@latest')) with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 - senders[0].send_batch(batched()) + senders[0].send(batched()) time.sleep(1) @@ -251,3 +251,25 @@ def batched(): assert (app_prop_key.encode('utf-8') in message.application_properties) \ and (dict(message.application_properties)[app_prop_key.encode('utf-8')] == app_prop_value.encode('utf-8')) + +@pytest.mark.liveTest +def test_receive_over_websocket_sync(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClient.from_connection_string(connection_str, transport_type=TransportType.AmqpOverWebsocket, network_tracing=False) + receiver = client.create_receiver(partition_id="0", prefetch=500, event_position=EventPosition('@latest')) + + event_list = [] + for i in range(20): + event_list.append(EventData("Event Number {}".format(i))) + + with receiver: + received = receiver.receive(timeout=5) + assert len(received) == 0 + + with senders[0] as sender: + sender.send(event_list) + + time.sleep(1) + + received = receiver.receive(max_batch_size=50, timeout=5) + assert len(received) == 20 diff --git a/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py b/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py index d44fb77106bb..b24cca267c82 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py @@ -8,10 +8,9 @@ import time import pytest -from azure import eventhub from azure.eventhub import ( EventData, - Offset, + EventPosition, EventHubError, EventHubClient) @@ -19,64 +18,40 @@ @pytest.mark.liveTest def test_send_with_long_interval_sync(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=True) - sender = client.add_sender() - try: - client.run() + client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + sender = client.create_sender() + with sender: sender.send(EventData(b"A single event")) - for _ in range(2): + for _ in range(1): time.sleep(300) sender.send(EventData(b"A single event")) - finally: - client.stop() received = [] for r in receivers: received.extend(r.receive(timeout=1)) - assert len(received) == 3 + assert len(received) == 2 assert list(received[0].body)[0] == b"A single event" @pytest.mark.liveTest def test_send_with_forced_conn_close_sync(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=True) - sender = client.add_sender() - try: - client.run() + client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + sender = client.create_sender() + with sender: sender.send(EventData(b"A single event")) - sender._handler._message_sender.destroy() + sender._handler._connection._conn.destroy() time.sleep(300) sender.send(EventData(b"A single event")) sender.send(EventData(b"A single event")) - sender._handler._message_sender.destroy() + sender._handler._connection._conn.destroy() time.sleep(300) sender.send(EventData(b"A single event")) sender.send(EventData(b"A single event")) - finally: - client.stop() received = [] for r in receivers: received.extend(r.receive(timeout=1)) assert len(received) == 5 assert list(received[0].body)[0] == b"A single event" - - -# def test_send_with_forced_link_detach(connstr_receivers): -# connection_str, receivers = connstr_receivers -# client = EventHubClient.from_connection_string(connection_str, debug=True) -# sender = client.add_sender() -# size = 20 * 1024 -# try: -# client.run() -# for i in range(1000): -# sender.transfer(EventData([b"A"*size, b"B"*size, b"C"*size, b"D"*size, b"A"*size, b"B"*size, b"C"*size, b"D"*size, b"A"*size, b"B"*size, b"C"*size, b"D"*size])) -# sender.wait() -# finally: -# client.stop() - -# received = [] -# for r in receivers: -# received.extend(r.receive(timeout=10)) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_send.py b/sdk/eventhub/azure-eventhubs/tests/test_send.py index cdf1f0ebc6d0..3af0cbed2ef2 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_send.py @@ -10,13 +10,13 @@ import json import sys -from azure.eventhub import EventData, EventHubClient +from azure.eventhub import EventData, EventHubClient, TransportType @pytest.mark.liveTest def test_send_with_partition_key(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() with sender: data_val = 0 @@ -24,19 +24,19 @@ def test_send_with_partition_key(connstr_receivers): partition_key = b"test_partition_" + partition for i in range(50): data = EventData(str(data_val)) - data.partition_key = partition_key + #data.partition_key = partition_key data_val += 1 - sender.send(data) + sender.send(data, batching_label=partition_key) found_partition_keys = {} for index, partition in enumerate(receivers): received = partition.receive(timeout=5) for message in received: try: - existing = found_partition_keys[message.partition_key] + existing = found_partition_keys[message._batching_label] assert existing == index except KeyError: - found_partition_keys[message.partition_key] = index + found_partition_keys[message._batching_label] = index @pytest.mark.liveTest @@ -44,7 +44,7 @@ def test_send_and_receive_large_body_size(connstr_receivers): if sys.platform.startswith('darwin'): pytest.skip("Skipping on OSX - open issue regarding message size") connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() with sender: payload = 250 * 1024 @@ -61,7 +61,7 @@ def test_send_and_receive_large_body_size(connstr_receivers): @pytest.mark.liveTest def test_send_and_receive_zero_length_body(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() with sender: sender.send(EventData("")) @@ -77,7 +77,7 @@ def test_send_and_receive_zero_length_body(connstr_receivers): @pytest.mark.liveTest def test_send_single_event(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() with sender: sender.send(EventData(b"A single event")) @@ -93,14 +93,15 @@ def test_send_single_event(connstr_receivers): @pytest.mark.liveTest def test_send_batch_sync(connstr_receivers): connection_str, receivers = connstr_receivers + def batched(): for i in range(10): yield EventData("Event number {}".format(i)) - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() with sender: - sender.send_batch(batched()) + sender.send(batched()) time.sleep(1) received = [] @@ -115,8 +116,8 @@ def batched(): @pytest.mark.liveTest def test_send_partition(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.create_sender(partition="1") + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + sender = client.create_sender(partition_id="1") with sender: sender.send(EventData(b"Data")) @@ -129,8 +130,8 @@ def test_send_partition(connstr_receivers): @pytest.mark.liveTest def test_send_non_ascii(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.create_sender(partition="0") + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + sender = client.create_sender(partition_id="0") with sender: sender.send(EventData(u"é,è,à,ù,â,ê,î,ô,û")) sender.send(EventData(json.dumps({"foo": u"漢字"}))) @@ -144,14 +145,15 @@ def test_send_non_ascii(connstr_receivers): @pytest.mark.liveTest def test_send_partition_batch(connstr_receivers): connection_str, receivers = connstr_receivers + def batched(): for i in range(10): yield EventData("Event number {}".format(i)) - client = EventHubClient.from_connection_string(connection_str, debug=False) - sender = client.create_sender(partition="1") + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + sender = client.create_sender(partition_id="1") with sender: - sender.send_batch(batched()) + sender.send(batched()) time.sleep(1) partition_0 = receivers[0].receive(timeout=2) @@ -163,7 +165,7 @@ def batched(): @pytest.mark.liveTest def test_send_array_sync(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=True) sender = client.create_sender() with sender: sender.send(EventData([b"A", b"B", b"C"])) @@ -179,9 +181,9 @@ def test_send_array_sync(connstr_receivers): @pytest.mark.liveTest def test_send_multiple_clients(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, debug=False) - sender_0 = client.create_sender(partition="0") - sender_1 = client.create_sender(partition="1") + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + sender_0 = client.create_sender(partition_id="0") + sender_1 = client.create_sender(partition_id="1") with sender_0: sender_0.send(EventData(b"Message 0")) with sender_1: @@ -195,7 +197,6 @@ def test_send_multiple_clients(connstr_receivers): @pytest.mark.liveTest def test_send_batch_with_app_prop_sync(connstr_receivers): - #pytest.skip("Waiting on uAMQP release") connection_str, receivers = connstr_receivers app_prop_key = "raw_prop" app_prop_value = "raw_value" @@ -211,11 +212,13 @@ def batched(): ed.application_properties = app_prop yield ed - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() with sender: - sender.send_batch(batched()) + sender.send(batched()) + time.sleep(1) + received = [] for r in receivers: received.extend(r.receive(timeout=3)) @@ -225,3 +228,24 @@ def batched(): assert list(message.body)[0] == "Event number {}".format(index).encode('utf-8') assert (app_prop_key.encode('utf-8') in message.application_properties) \ and (dict(message.application_properties)[app_prop_key.encode('utf-8')] == app_prop_value.encode('utf-8')) + + +@pytest.mark.liveTest +def test_send_over_websocket_sync(connstr_receivers): + connection_str, receivers = connstr_receivers + client = EventHubClient.from_connection_string(connection_str, transport_type=TransportType.AmqpOverWebsocket, network_tracing=False) + sender = client.create_sender() + + event_list = [] + for i in range(20): + event_list.append(EventData("Event Number {}".format(i))) + + with sender: + sender.send(event_list) + + time.sleep(1) + received = [] + for r in receivers: + received.extend(r.receive(timeout=3)) + + assert len(received) == 20 From b77d019f884ee8ff3cef524f968ddbdaedbd628b Mon Sep 17 00:00:00 2001 From: Yijun Xie <48257664+YijunXieMS@users.noreply.github.com> Date: Fri, 14 Jun 2019 14:14:51 -0700 Subject: [PATCH 04/54] Error hierarchy, sample code and docstring (#5743) * Recover from fork repo * Packaging update of azure-eventhubs * Fix error message * update iterator example * Revert "Packaging update of azure-eventhubs" This reverts commit 56fc4f01126daa85956222ea6f2a992146349bee. * disable autorest auto update * Sender/Receiver -> EventSender/Receiver * Change _batching_label back to partition_key * Remove transfer examples * move async to async folder * Update docstring string, sample codes and test codes (#5793) * catch and process LinkRedirect * Add receiver iterator pytest * small fix of iterator example * add retrieval_time to partition prop * fix open and re-send bugs * small fixes * fix reconnect test case * close iterator when closing receiver * Misc changes for code review fix * client.py type hints * catch KeyboardInterrupt * add next() for 2.7 iterator * raise KeyboardInterrupt instead of exit() --- .../azure/eventhub/__init__.py | 22 +- .../azure/eventhub/aio/__init__.py | 8 +- .../eventhub/aio/event_hubs_client_async.py | 85 +++-- .../azure/eventhub/aio/receiver_async.py | 317 ++++++++-------- .../azure/eventhub/aio/sender_async.py | 314 ++++++++-------- .../azure-eventhubs/azure/eventhub/client.py | 85 ++--- .../azure/eventhub/client_abstract.py | 140 +++++--- .../azure-eventhubs/azure/eventhub/common.py | 118 ++++-- .../azure/eventhub/configuration.py | 2 +- .../azure-eventhubs/azure/eventhub/error.py | 37 +- .../azure/eventhub/receiver.py | 340 +++++++++++------- .../azure-eventhubs/azure/eventhub/sender.py | 311 ++++++++-------- .../eventprocessorhost/eh_partition_pump.py | 23 +- .../eventprocessorhost/partition_manager.py | 25 +- sdk/eventhub/azure-eventhubs/conftest.py | 17 +- .../azure-eventhubs/dev_requirements.txt | 1 + .../async_examples/iterator_receiver_async.py | 49 +++ .../examples/async_examples/recv_async.py | 61 ++++ .../examples/async_examples/send_async.py | 62 ++++ .../test_examples_eventhub_async.py | 155 +++----- .../azure-eventhubs/examples/batch_send.py | 43 +-- .../examples/batch_transfer.py | 62 ---- .../examples/client_secret_auth.py | 48 +++ sdk/eventhub/azure-eventhubs/examples/eph.py | 2 +- .../azure-eventhubs/examples/iothub_recv.py | 26 +- .../azure-eventhubs/examples/iothub_send.py | 29 ++ .../examples/iterator_receiver.py | 44 +++ .../azure-eventhubs/examples/proxy.py | 42 ++- sdk/eventhub/azure-eventhubs/examples/recv.py | 51 ++- .../azure-eventhubs/examples/recv_async.py | 62 ---- .../azure-eventhubs/examples/recv_batch.py | 44 ++- .../azure-eventhubs/examples/recv_epoch.py | 46 ++- sdk/eventhub/azure-eventhubs/examples/send.py | 35 +- .../azure-eventhubs/examples/send_async.py | 61 ---- .../examples/test_examples_eventhub.py | 204 +++-------- .../azure-eventhubs/examples/transfer.py | 59 --- .../azure-eventhubs/sdk_packaging.toml | 2 + sdk/eventhub/azure-eventhubs/setup.py | 11 +- .../tests/asynctests/test_auth_async.py | 46 +++ .../asynctests/test_iothub_receive_async.py | 1 + .../tests/asynctests/test_longrunning_eph.py | 21 +- .../test_longrunning_eph_with_context.py | 20 +- .../test_longrunning_receive_async.py | 44 ++- .../asynctests/test_longrunning_send_async.py | 15 +- .../tests/asynctests/test_negative_async.py | 8 +- .../tests/asynctests/test_properties_async.py | 8 +- .../test_receiver_iterator_async.py | 30 ++ .../tests/asynctests/test_reconnect_async.py | 26 +- .../tests/asynctests/test_send_async.py | 6 +- .../azure-eventhubs/tests/test_auth.py | 40 +++ .../azure-eventhubs/tests/test_iothub_send.py | 2 - .../tests/test_longrunning_receive.py | 14 +- .../tests/test_longrunning_send.py | 13 +- .../azure-eventhubs/tests/test_negative.py | 7 +- .../azure-eventhubs/tests/test_properties.py | 8 +- .../tests/test_receiver_iterator.py | 31 ++ .../azure-eventhubs/tests/test_reconnect.py | 25 +- .../azure-eventhubs/tests/test_send.py | 6 +- 58 files changed, 1867 insertions(+), 1547 deletions(-) create mode 100644 sdk/eventhub/azure-eventhubs/examples/async_examples/iterator_receiver_async.py create mode 100644 sdk/eventhub/azure-eventhubs/examples/async_examples/recv_async.py create mode 100644 sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py delete mode 100644 sdk/eventhub/azure-eventhubs/examples/batch_transfer.py create mode 100644 sdk/eventhub/azure-eventhubs/examples/client_secret_auth.py create mode 100644 sdk/eventhub/azure-eventhubs/examples/iothub_send.py create mode 100644 sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py delete mode 100644 sdk/eventhub/azure-eventhubs/examples/recv_async.py delete mode 100644 sdk/eventhub/azure-eventhubs/examples/send_async.py delete mode 100644 sdk/eventhub/azure-eventhubs/examples/transfer.py create mode 100644 sdk/eventhub/azure-eventhubs/sdk_packaging.toml create mode 100644 sdk/eventhub/azure-eventhubs/tests/asynctests/test_auth_async.py create mode 100644 sdk/eventhub/azure-eventhubs/tests/asynctests/test_receiver_iterator_async.py create mode 100644 sdk/eventhub/azure-eventhubs/tests/test_auth.py create mode 100644 sdk/eventhub/azure-eventhubs/tests/test_receiver_iterator.py diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py index 9766b6816ab8..4de864f577ce 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py @@ -3,31 +3,33 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -__version__ = "1.3.1" +__version__ = "2.0.0-preview.1" from azure.eventhub.common import EventData, EventPosition -from azure.eventhub.error import EventHubError, EventDataError, ConnectError, AuthenticationError +from azure.eventhub.error import EventHubError, EventDataError, ConnectError, \ + AuthenticationError, EventDataSendError, ConnectionLostError from azure.eventhub.client import EventHubClient -from azure.eventhub.sender import Sender -from azure.eventhub.receiver import Receiver +from azure.eventhub.sender import EventSender +from azure.eventhub.receiver import EventReceiver from .constants import MessageSendResult from .constants import TransportType -from .common import FIRST_AVAILABLE, NEW_EVENTS_ONLY, SharedKeyCredentials, SASTokenCredentials +from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential __all__ = [ "__version__", "EventData", "EventHubError", "ConnectError", + "ConnectionLostError", "EventDataError", + "EventDataSendError", "AuthenticationError", "EventPosition", "EventHubClient", - "Sender", - "Receiver", + "EventSender", + "EventReceiver", "MessageSendResult", "TransportType", - "FIRST_AVAILABLE", "NEW_EVENTS_ONLY", - "SharedKeyCredentials", - "SASTokenCredentials", + "EventHubSharedKeyCredential", + "EventHubSASTokenCredential", ] diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py index 020392000d1f..88fd0673f4df 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py @@ -1,9 +1,9 @@ from .event_hubs_client_async import EventHubClient -from .receiver_async import Receiver -from .sender_async import Sender +from .receiver_async import EventReceiver +from .sender_async import EventSender __all__ = [ "EventHubClient", - "Receiver", - "Sender" + "EventReceiver", + "EventSender" ] diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py index d88461c98d0c..2a006373fccd 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py @@ -15,13 +15,13 @@ AMQPClientAsync, ) -from azure.eventhub.common import parse_sas_token, SharedKeyCredentials, SASTokenCredentials +from azure.eventhub.common import parse_sas_token, EventPosition, EventHubSharedKeyCredential, EventHubSASTokenCredential from azure.eventhub import ( EventHubError) from ..client_abstract import EventHubClientAbstract -from .sender_async import Sender -from .receiver_async import Receiver +from .sender_async import EventSender +from .receiver_async import EventReceiver log = logging.getLogger(__name__) @@ -56,7 +56,7 @@ def _create_auth(self, username=None, password=None): transport_type = self.config.transport_type auth_timeout = self.config.auth_timeout - if isinstance(self.credential, SharedKeyCredentials): + if isinstance(self.credential, EventHubSharedKeyCredential): username = username or self._auth_config['username'] password = password or self._auth_config['password'] if "@sas.root" in username: @@ -66,7 +66,7 @@ def _create_auth(self, username=None, password=None): self.auth_uri, username, password, timeout=auth_timeout, http_proxy=http_proxy, transport_type=transport_type) - elif isinstance(self.credential, SASTokenCredentials): + elif isinstance(self.credential, EventHubSASTokenCredential): token = self.credential.get_sas_token() try: expiry = int(parse_sas_token(token)['se']) @@ -85,10 +85,14 @@ def _create_auth(self, username=None, password=None): get_jwt_token, http_proxy=http_proxy, transport_type=transport_type) - async def get_properties(self): """ - Get details on the specified EventHub async. + Get properties of the specified EventHub async. + Keys in the details dictionary include: + + -'path' + -'created_at' + -'partition_ids' :rtype: dict """ @@ -117,21 +121,25 @@ async def get_properties(self): await mgmt_client.close_async() async def get_partition_ids(self): + """ + Get partition ids of the specified EventHub async. + + :rtype: list[str] + """ return (await self.get_properties())['partition_ids'] async def get_partition_properties(self, partition): """ - Get information on the specified partition async. + Get properties of the specified partition async. Keys in the details dictionary include: - -'name' - -'type' - -'partition' - -'begin_sequence_number' + -'event_hub_path' + -'id' + -'beginning_sequence_number' -'last_enqueued_sequence_number' -'last_enqueued_offset' -'last_enqueued_time_utc' - -'is_partition_empty' + -'is_empty' :param partition: The target partition id. :type partition: str @@ -168,23 +176,27 @@ async def get_partition_properties(self, partition): await mgmt_client.close_async() def create_receiver( - self, partition_id, consumer_group="$Default", event_position=None, exclusive_receiver_priority=None, operation=None, - prefetch=None, loop=None): + self, partition_id, consumer_group="$Default", event_position=EventPosition.first_available_event(), exclusive_receiver_priority=None, + operation=None, prefetch=None, loop=None): """ - Add an async receiver to the client for a particular consumer group and partition. + Create an async receiver to the client for a particular consumer group and partition. - :param consumer_group: The name of the consumer group. + :param partition_id: The ID of the partition. + :type partition_id: str + :param consumer_group: The name of the consumer group. Default value is `$Default`. :type consumer_group: str - :param partition: The ID of the partition. - :type partition: str :param event_position: The position from which to start receiving. :type event_position: ~azure.eventhub.common.EventPosition - :param prefetch: The message prefetch count of the receiver. Default is 300. - :type prefetch: int - :operation: An optional operation to be appended to the hostname in the source URL. + :param exclusive_receiver_priority: The priority of the exclusive receiver. The client will create an exclusive + receiver if exclusive_receiver_priority is set. + :type exclusive_receiver_priority: int + :param operation: An optional operation to be appended to the hostname in the source URL. The value must start with `/` character. :type operation: str - :rtype: ~azure.eventhub.aio.receiver_async.ReceiverAsync + :param prefetch: The message prefetch count of the receiver. Default is 300. + :type prefetch: int + :param loop: An event loop. If not specified the default event loop will be used. + :rtype: ~azure.eventhub.aio.receiver_async.EventReceiver Example: .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py @@ -200,35 +212,30 @@ def create_receiver( path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, path, consumer_group, partition_id) - handler = Receiver( - self, source_url, offset=event_position, exclusive_receiver_priority=exclusive_receiver_priority, + handler = EventReceiver( + self, source_url, event_position=event_position, exclusive_receiver_priority=exclusive_receiver_priority, prefetch=prefetch, loop=loop) return handler def create_sender( self, partition_id=None, operation=None, send_timeout=None, loop=None): """ - Add an async sender to the client to send ~azure.eventhub.common.EventData object + Create an async sender to the client to send ~azure.eventhub.common.EventData object to an EventHub. - :param partition: Optionally specify a particular partition to send to. + :param partition_id: Optionally specify a particular partition to send to. If omitted, the events will be distributed to available partitions via round-robin. - :type partition: str - :operation: An optional operation to be appended to the hostname in the target URL. + :type partition_id: str + :param operation: An optional operation to be appended to the hostname in the target URL. The value must start with `/` character. :type operation: str :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is queued. Default value is 60 seconds. If set to 0, there will be no timeout. - :type send_timeout: int - :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during - periods of inactivity. The default value is 30 seconds. If set to `None`, the connection will not - be pinged. - :type keep_alive: int - :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. - Default value is `True`. - :type auto_reconnect: bool - :rtype: ~azure.eventhub.aio.sender_async.SenderAsync + :type send_timeout: float + :param loop: An event loop. If not specified the default event loop will be used. + :rtype ~azure.eventhub.aio.sender_async.EventSender + Example: .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py @@ -245,6 +252,6 @@ def create_sender( target = target + operation send_timeout = self.config.send_timeout if send_timeout is None else send_timeout - handler = Sender( + handler = EventSender( self, target, partition=partition_id, send_timeout=send_timeout, loop=loop) return handler diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py index 6614001dc93d..6ee82725d2e2 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py @@ -7,33 +7,25 @@ import uuid import logging -from uamqp import errors, types +from uamqp import errors, types, compat from uamqp import ReceiveClientAsync, Source from azure.eventhub import EventHubError, EventData -from azure.eventhub.error import EventHubError, AuthenticationError, ConnectError, _error_handler +from azure.eventhub.error import EventHubError, AuthenticationError, ConnectError, ConnectionLostError, _error_handler log = logging.getLogger(__name__) -class Receiver(object): +class EventReceiver(object): """ - Implements the async API of a Receiver. - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START create_eventhub_client_async_receiver_instance] - :end-before: [END create_eventhub_client_async_receiver_instance] - :language: python - :dedent: 4 - :caption: Create a new instance of the Async Receiver. + Implements the async API of a EventReceiver. """ timeout = 0 _epoch = b'com.microsoft:epoch' def __init__( # pylint: disable=super-init-not-called - self, client, source, offset=None, prefetch=300, exclusive_receiver_priority=None, + self, client, source, event_position=None, prefetch=300, exclusive_receiver_priority=None, keep_alive=None, auto_reconnect=True, loop=None): """ Instantiate an async receiver. @@ -42,18 +34,21 @@ def __init__( # pylint: disable=super-init-not-called :type client: ~azure.eventhub.aio.EventHubClientAsync :param source: The source EventHub from which to receive events. :type source: ~uamqp.address.Source + :param event_position: The position from which to start receiving. + :type event_position: ~azure.eventhub.common.EventPosition :param prefetch: The number of events to prefetch from the service for processing. Default is 300. :type prefetch: int - :param epoch: An optional epoch value. - :type epoch: int + :param exclusive_receiver_priority: The priority of the exclusive receiver. It will an exclusive + receiver if exclusive_receiver_priority is set. + :type exclusive_receiver_priority: int :param loop: An event loop. """ self.loop = loop or asyncio.get_event_loop() self.running = False self.client = client self.source = source - self.offset = offset + self.offset = event_position self.messages_iter = None self.prefetch = prefetch self.exclusive_receiver_priority = exclusive_receiver_priority @@ -68,7 +63,7 @@ def __init__( # pylint: disable=super-init-not-called self.name = "EHReceiver-{}-partition{}".format(uuid.uuid4(), partition) source = Source(self.source) if self.offset is not None: - source.set_filter(self.offset.selector()) + source.set_filter(self.offset._selector()) # pylint: disable=protected-access if exclusive_receiver_priority: self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(exclusive_receiver_priority))} self._handler = ReceiveClientAsync( @@ -81,7 +76,7 @@ def __init__( # pylint: disable=super-init-not-called error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties(self.client.config.user_agent), + properties=self.client._create_properties(self.client.config.user_agent), # pylint: disable=protected-access loop=self.loop) async def __aenter__(self): @@ -95,7 +90,10 @@ def __aiter__(self): async def __anext__(self): await self._open() + max_retries = self.client.config.max_retries + connecting_count = 0 while True: + connecting_count += 1 try: if not self.messages_iter: self.messages_iter = self._handler.receive_messages_iter_async() @@ -103,62 +101,76 @@ async def __anext__(self): event_data = EventData(message=message) self.offset = event_data.offset return event_data - except (errors.TokenExpired, errors.AuthenticationException): - log.info("Receiver disconnected due to token error. Attempting reconnect.") - await self.reconnect() + except errors.AuthenticationException as auth_error: + if connecting_count < max_retries: + log.info("EventReceiver disconnected due to token error. Attempting reconnect.") + await self._reconnect() + else: + log.info("EventReceiver authentication failed. Shutting down.") + error = AuthenticationError(str(auth_error), auth_error) + await self.close(auth_error) + raise error except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry and self.auto_reconnect: - log.info("Receiver detached. Attempting reconnect.") - await self.reconnect() + log.info("EventReceiver detached. Attempting reconnect.") + await self._reconnect() else: - log.info("Receiver detached. Shutting down.") - error = ConnectError(str(shutdown), shutdown) + log.info("EventReceiver detached. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) await self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("Receiver detached. Attempting reconnect.") - await self.reconnect() + if connecting_count < max_retries: + log.info("EventReceiver detached. Attempting reconnect.") + await self._reconnect() else: - log.info("Receiver detached. Shutting down.") - error = ConnectError(str(shutdown), shutdown) - await self.close(exception=error) + log.info("EventReceiver detached. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) + await self.close(error) raise error + except errors.AMQPConnectionError as shutdown: + if connecting_count < max_retries: + log.info("EventReceiver connection lost. Attempting reconnect.") + await self._reconnect() + else: + log.info("EventReceiver connection lost. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) + await self.close(error) + raise error + except compat.TimeoutException as shutdown: + if connecting_count < max_retries: + log.info("EventReceiver timed out receiving event data. Attempting reconnect.") + await self._reconnect() + else: + log.info("EventReceiver timed out. Shutting down.") + await self.close(shutdown) + raise TimeoutError(str(shutdown), shutdown) except StopAsyncIteration: raise - except asyncio.CancelledError: - # TODO: stop self.message_iter - raise except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) error = EventHubError("Receive failed: {}".format(e)) await self.close(exception=error) raise error + def _check_closed(self): + if self.error: + raise EventHubError("This receiver has been closed. Please create a new receiver to receive event data.", + self.error) async def _open(self): """ - Open the Receiver using the supplied conneciton. + Open the EventReceiver using the supplied connection. If the handler has previously been redirected, the redirect context will be used to create a new handler before opening it. - :param connection: The underlying client shared connection. - :type: connection: ~uamqp.async_ops.connection_async.ConnectionAsync - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START eventhub_client_async_receiver_open] - :end-before: [END eventhub_client_async_receiver_open] - :language: python - :dedent: 4 - :caption: Open the Receiver using the supplied conneciton. - """ # pylint: disable=protected-access + self._check_closed() if self.redirected: self.source = self.redirected.address source = Source(self.source) if self.offset is not None: - source.set_filter(self.offset.selector()) + source.set_filter(self.offset._selector()) # pylint: disable=protected-access alt_creds = { "username": self.client._auth_config.get("iot_username"), "password":self.client._auth_config.get("iot_password")} @@ -172,107 +184,93 @@ async def _open(self): error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties(self.client.config.user_agent), + properties=self.client._create_properties(self.client.config.user_agent), # pylint: disable=protected-access loop=self.loop) - if not self.running: - try: - await self._handler.open_async() - self.running = True - while not await self._handler.client_ready_async(): - await asyncio.sleep(0.05) - except errors.AuthenticationException: - log.info("Receiver failed authentication. Retrying...") - await self.reconnect() - except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("Receiver detached. Attempting reconnect.") - await self.reconnect() - else: - log.info("Receiver detached. Failed to connect") - error = ConnectError(str(shutdown), shutdown) - raise error - except errors.AMQPConnectionError as shutdown: - if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: - log.info("Receiver couldn't authenticate (%r).", shutdown) - error = AuthenticationError(str(shutdown)) - raise error - else: - log.info("Receiver connection error (%r).", shutdown) - error = ConnectError(str(shutdown)) - raise error - except Exception as e: - log.info("Unexpected error occurred (%r)", e) - error = EventHubError("Receiver connect failed: {}".format(e)) - raise error + await self._connect() + self.running = True - async def _reconnect(self): # pylint: disable=too-many-statements + async def _connect(self): + connected = await self._build_connection() + if not connected: + await asyncio.sleep(self.reconnect_backoff) + while not await self._build_connection(is_reconnect=True): + await asyncio.sleep(self.reconnect_backoff) + + async def _build_connection(self, is_reconnect=False): # pylint: disable=too-many-statements # pylint: disable=protected-access - alt_creds = { - "username": self.client._auth_config.get("iot_username"), - "password":self.client._auth_config.get("iot_password")} - await self._handler.close_async() - source = Source(self.source) - if self.offset is not None: - source.set_filter(self.offset.selector()) - self._handler = ReceiveClientAsync( - source, - auth=self.client.get_auth(**alt_creds), - debug=self.client.config.network_tracing, - prefetch=self.prefetch, - link_properties=self.properties, - timeout=self.timeout, - error_policy=self.retry_policy, - keep_alive_interval=self.keep_alive, - client_name=self.name, - properties=self.client.create_properties(self.client.config.user_agent), - loop=self.loop) - self.messages_iter = None + if is_reconnect: + alt_creds = { + "username": self.client._auth_config.get("iot_username"), + "password":self.client._auth_config.get("iot_password")} + await self._handler.close_async() + source = Source(self.source) + if self.offset is not None: + source.set_filter(self.offset._selector()) # pylint: disable=protected-access + self._handler = ReceiveClientAsync( + source, + auth=self.client.get_auth(**alt_creds), + debug=self.client.config.network_tracing, + prefetch=self.prefetch, + link_properties=self.properties, + timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client._create_properties(self.client.config.user_agent), # pylint: disable=protected-access + loop=self.loop) + self.messages_iter = None try: await self._handler.open_async() while not await self._handler.client_ready_async(): await asyncio.sleep(0.05) return True except errors.AuthenticationException as shutdown: - log.info("AsyncReceiver disconnected due to token expiry. Shutting down.") - error = AuthenticationError(str(shutdown), shutdown) - await self.close(exception=error) - raise error + if is_reconnect: + log.info("EventReceiver couldn't authenticate. Shutting down. (%r)", shutdown) + error = AuthenticationError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + else: + log.info("EventReceiver couldn't authenticate. Attempting reconnect.") + return False except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("AsyncReceiver detached. Attempting reconnect.") + if shutdown.action.retry: + log.info("EventReceiver detached. Attempting reconnect.") return False - log.info("AsyncReceiver detached. Shutting down.") - error = ConnectError(str(shutdown), shutdown) - await self.close(exception=error) - raise error + else: + log.info("EventReceiver detached. Shutting down.") + error = ConnectError(str(shutdown), shutdown) + await self.close(exception=error) + raise error except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("AsyncReceiver detached. Attempting reconnect.") + if is_reconnect: + log.info("EventReceiver detached. Shutting down.") + error = ConnectError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + else: + log.info("EventReceiver detached. Attempting reconnect.") return False - log.info("AsyncReceiver detached. Shutting down.") - error = ConnectError(str(shutdown), shutdown) - await self.close(exception=error) - raise error except errors.AMQPConnectionError as shutdown: - if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: - log.info("AsyncReceiver couldn't authenticate. Attempting reconnect.") + if is_reconnect: + log.info("EventReceiver connection error (%r). Shutting down.", shutdown) + error = AuthenticationError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + else: + log.info("EventReceiver couldn't authenticate. Attempting reconnect.") return False - log.info("AsyncReceiver connection error (%r). Shutting down.", shutdown) - error = ConnectError(str(shutdown)) - await self.close(exception=error) - raise error except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Receiver reconnect failed: {}".format(e)) + error = EventHubError("EventReceiver reconnect failed: {}".format(e)) await self.close(exception=error) raise error - async def reconnect(self): - """If the Receiver was disconnected from the service with + async def _reconnect(self): + """If the EventReceiver was disconnected from the service with a retryable error - attempt to reconnect.""" - while not await self._reconnect(): - await asyncio.sleep(self.reconnect_backoff) + return await self._build_connection(is_reconnect=True) async def close(self, exception=None): """ @@ -330,6 +328,11 @@ async def receive(self, max_batch_size=None, timeout=None): retrieve before the time, the result will be empty. If no batch size is supplied, the prefetch size will be the maximum. :type max_batch_size: int + :param timeout: The timeout time in seconds to receive a batch of events + from an Event Hub. Results will be returned after timeout. If combined + with max_batch_size, it will return after either the count of received events + reaches the max_batch_size or the operation has timed out. + :type timeout: int :rtype: list[~azure.eventhub.common.EventData] Example: @@ -338,16 +341,19 @@ async def receive(self, max_batch_size=None, timeout=None): :end-before: [END eventhub_client_async_receive] :language: python :dedent: 4 - :caption: Sends an event data and asynchronously waits - until acknowledgement is received or operation times out. + :caption: Receives events asynchronously """ - if self.error: - raise self.error await self._open() + max_batch_size = min(self.client.config.max_batch_size, self.prefetch) if max_batch_size is None else max_batch_size + timeout = self.client.config.receive_timeout if timeout is None else timeout + data_batch = [] + max_retries = self.client.config.max_retries + connecting_count = 0 while True: + connecting_count += 1 try: timeout_ms = 1000 * timeout if timeout else 0 message_batch = await self._handler.receive_message_batch_async( @@ -358,29 +364,52 @@ async def receive(self, max_batch_size=None, timeout=None): self.offset = event_data.offset data_batch.append(event_data) return data_batch - except (errors.TokenExpired, errors.AuthenticationException): - log.info("AsyncReceiver disconnected due to token error. Attempting reconnect.") - await self.reconnect() + except errors.AuthenticationException as auth_error: + if connecting_count < max_retries: + log.info("EventReceiver disconnected due to token error. Attempting reconnect.") + await self._reconnect() + else: + log.info("EventReceiver authentication failed. Shutting down.") + error = AuthenticationError(str(auth_error), auth_error) + await self.close(auth_error) + raise error except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry and self.auto_reconnect: - log.info("AsyncReceiver detached. Attempting reconnect.") - await self.reconnect() + log.info("EventReceiver detached. Attempting reconnect.") + await self._reconnect() else: - log.info("AsyncReceiver detached. Shutting down.") - error = ConnectError(str(shutdown), shutdown) + log.info("EventReceiver detached. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) await self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("AsyncReceiver detached. Attempting reconnect.") - await self.reconnect() + if connecting_count < max_retries: + log.info("EventReceiver detached. Attempting reconnect.") + await self._reconnect() else: - log.info("AsyncReceiver detached. Shutting down.") - error = ConnectError(str(shutdown), shutdown) - await self.close(exception=error) + log.info("EventReceiver detached. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) + await self.close(error) raise error + except errors.AMQPConnectionError as shutdown: + if connecting_count < max_retries: + log.info("EventReceiver connection lost. Attempting reconnect.") + await self._reconnect() + else: + log.info("EventReceiver connection lost. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) + await self.close(error) + raise error + except compat.TimeoutException as shutdown: + if connecting_count < max_retries: + log.info("EventReceiver timed out receiving event data. Attempting reconnect.") + await self._reconnect() + else: + log.info("EventReceiver timed out. Shutting down.") + await self.close(shutdown) + raise TimeoutError(str(shutdown), shutdown) except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) error = EventHubError("Receive failed: {}".format(e)) await self.close(exception=error) - raise error + raise error \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py index e263131ff859..f2d09a2df457 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py @@ -7,29 +7,21 @@ import asyncio import logging -from uamqp import constants, errors +from uamqp import constants, errors, compat from uamqp import SendClientAsync from azure.eventhub import MessageSendResult from azure.eventhub import EventHubError from azure.eventhub.common import EventData, _BatchSendEventData from azure.eventhub.error import EventHubError, ConnectError, \ - AuthenticationError, EventDataError, _error_handler + AuthenticationError, EventDataError, EventDataSendError, ConnectionLostError, _error_handler log = logging.getLogger(__name__) -class Sender(object): +class EventSender(object): """ - Implements the async API of a Sender. - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START create_eventhub_client_async_sender_instance] - :end-before: [END create_eventhub_client_async_sender_instance] - :language: python - :dedent: 4 - :caption: Create a new instance of the Async Sender. + Implements the async API of a EventSender. """ @@ -48,10 +40,10 @@ def __init__( # pylint: disable=super-init-not-called :type partition: str :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is queued. Default value is 60 seconds. If set to 0, there will be no timeout. - :type send_timeout: int + :type send_timeout: float :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during periods of inactivity. The default value is `None`, i.e. no keep alive pings. - :type keep_alive: int + :type keep_alive: float :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. Default value is `True`. :type auto_reconnect: bool @@ -68,6 +60,7 @@ def __init__( # pylint: disable=super-init-not-called self.retry_policy = errors.ErrorPolicy(max_retries=self.client.config.max_retries, on_error=_error_handler) self.reconnect_backoff = 1 self.name = "EHSender-{}".format(uuid.uuid4()) + self.unsent_events = None self.redirected = None self.error = None if partition: @@ -81,7 +74,7 @@ def __init__( # pylint: disable=super-init-not-called error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties(self.client.config.user_agent), + properties=self.client._create_properties(self.client.config.user_agent), # pylint: disable=protected-access loop=self.loop) self._outcome = None self._condition = None @@ -94,21 +87,10 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): async def _open(self): """ - Open the Sender using the supplied conneciton. + Open the EventSender using the supplied connection. If the handler has previously been redirected, the redirect context will be used to create a new handler before opening it. - :param connection: The underlying client shared connection. - :type: connection: ~uamqp.async_ops.connection_async.ConnectionAsync - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START eventhub_client_async_sender_open] - :end-before: [END eventhub_client_async_sender_open] - :language: python - :dedent: 4 - :caption: Open the Sender using the supplied conneciton. - """ if self.redirected: self.target = self.redirected.address @@ -120,99 +102,87 @@ async def _open(self): error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties(self.client.config.user_agent), + properties=self.client._create_properties(self.client.config.user_agent), # pylint: disable=protected-access loop=self.loop) if not self.running: - try: - await self._handler.open_async() - self.running = True - while not await self._handler.client_ready_async(): - await asyncio.sleep(0.05) - except errors.AuthenticationException: - log.info("Sender failed authentication. Retrying...") - await self.reconnect() - except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("Sender detached. Attempting reconnect.") - await self.reconnect() - else: - log.info("Sender detached. Failed to connect") - error = ConnectError(str(shutdown), shutdown) - raise error - except errors.AMQPConnectionError as shutdown: - if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: - log.info("Sender couldn't authenticate.", shutdown) - error = AuthenticationError(str(shutdown)) - raise error - else: - log.info("Sender connection error (%r).", shutdown) - error = ConnectError(str(shutdown)) - raise error - except Exception as e: - log.info("Unexpected error occurred (%r)", e) - error = EventHubError("Sender connect failed: {}".format(e)) - raise error + await self._connect() + self.running = True - async def _reconnect(self): - await self._handler.close_async() - unsent_events = self._handler.pending_messages - self._handler = SendClientAsync( - self.target, - auth=self.client.get_auth(), - debug=self.client.config.network_tracing, - msg_timeout=self.timeout, - error_policy=self.retry_policy, - keep_alive_interval=self.keep_alive, - client_name=self.name, - properties=self.client.create_properties(self.client.config.user_agent), - loop=self.loop) + async def _connect(self): + connected = await self._build_connection() + if not connected: + await asyncio.sleep(self.reconnect_backoff) + while not await self._build_connection(is_reconnect=True): + await asyncio.sleep(self.reconnect_backoff) + + async def _build_connection(self, is_reconnect=False): + """ + + :param is_reconnect: True - trying to reconnect after fail to connect or a connection is lost. + False - the 1st time to connect + :return: True - connected. False - not connected + """ + # pylint: disable=protected-access + if is_reconnect: + await self._handler.close_async() + self._handler = SendClientAsync( + self.target, + auth=self.client.get_auth(), + debug=self.client.config.network_tracing, + msg_timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client._create_properties(self.client.config.user_agent)) try: await self._handler.open_async() while not await self._handler.client_ready_async(): await asyncio.sleep(0.05) - self._handler.queue_message(*unsent_events) - await self._handler.wait_async() return True except errors.AuthenticationException as shutdown: - log.info("AsyncSender disconnected due to token expiry. Shutting down.") - error = AuthenticationError(str(shutdown), shutdown) - await self.close(exception=error) - raise error + if is_reconnect: + log.info("EventSender couldn't authenticate. Shutting down. (%r)", shutdown) + error = AuthenticationError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + else: + log.info("EventSender couldn't authenticate. Attempting reconnect.") + return False except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("AsyncSender detached. Attempting reconnect.") + if shutdown.action.retry: + log.info("EventSender detached. Attempting reconnect.") return False - log.info("AsyncSender reconnect failed. Shutting down.") - error = ConnectError(str(shutdown), shutdown) - await self.close(exception=error) - raise error + else: + log.info("EventSender detached. Shutting down.") + error = ConnectError(str(shutdown), shutdown) + await self.close(exception=error) + raise error except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("AsyncSender detached. Attempting reconnect.") + if is_reconnect: + log.info("EventSender detached. Shutting down.") + error = ConnectError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + else: + log.info("EventSender detached. Attempting reconnect.") return False - log.info("AsyncSender reconnect failed. Shutting down.") - error = ConnectError(str(shutdown), shutdown) - await self.close(exception=error) - raise error except errors.AMQPConnectionError as shutdown: - if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: - log.info("AsyncSender couldn't authenticate. Attempting reconnect.") + if is_reconnect: + log.info("EventSender connection error (%r). Shutting down.", shutdown) + error = AuthenticationError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + else: + log.info("EventSender couldn't authenticate. Attempting reconnect.") return False - log.info("AsyncSender connection error (%r). Shutting down.", shutdown) - error = ConnectError(str(shutdown)) - await self.close(exception=error) - raise error except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Sender reconnect failed: {}".format(e)) + error = EventHubError("EventSender Reconnect failed: {}".format(e)) await self.close(exception=error) raise error - async def reconnect(self): - """If the Receiver was disconnected from the service with - a retryable error - attempt to reconnect.""" - while not await self._reconnect(): - await asyncio.sleep(self.reconnect_backoff) + async def _reconnect(self): + return await self._build_connection(is_reconnect=True) async def close(self, exception=None): """ @@ -248,85 +218,130 @@ async def close(self, exception=None): self.error = EventHubError("This send handler is now closed.") await self._handler.close_async() - async def _send_event_data(self, event_data): + async def _send_event_data(self): await self._open() - try: - self._handler.send_message(event_data.message) - if self._outcome != MessageSendResult.Ok: - raise Sender._error(self._outcome, self._condition) - except errors.MessageException as failed: - error = EventDataError(str(failed), failed) - await self.close(exception=error) - raise error - except (errors.TokenExpired, errors.AuthenticationException): - log.info("Sender disconnected due to token error. Attempting reconnect.") - await self.reconnect() - except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("Sender detached. Attempting reconnect.") - await self.reconnect() - else: - log.info("Sender detached. Shutting down.") - error = ConnectError(str(shutdown), shutdown) + max_retries = self.client.config.max_retries + connecting_count = 0 + while True: + connecting_count += 1 + try: + if self.unsent_events: + self._handler.queue_message(*self.unsent_events) + await self._handler.wait_async() + self.unsent_events = self._handler.pending_messages + if self._outcome != constants.MessageSendResult.Ok: + EventSender._error(self._outcome, self._condition) + return + except (errors.MessageAccepted, + errors.MessageAlreadySettled, + errors.MessageModified, + errors.MessageRejected, + errors.MessageReleased, + errors.MessageContentTooLarge) as msg_error: + raise EventDataError(str(msg_error), msg_error) + except errors.MessageException as failed: + log.info("Send event data error (%r)", failed) + error = EventDataSendError(str(failed), failed) await self.close(exception=error) raise error - except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("Sender detached. Attempting reconnect.") - await self.reconnect() - else: - log.info("Sender detached. Shutting down.") - error = ConnectError(str(shutdown), shutdown) + except errors.AuthenticationException as auth_error: + if connecting_count < max_retries: + log.info("EventSender disconnected due to token error. Attempting reconnect.") + await self._reconnect() + else: + log.info("EventSender authentication failed. Shutting down.") + error = AuthenticationError(str(auth_error), auth_error) + await self.close(auth_error) + raise error + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry: + log.info("EventSender detached. Attempting reconnect.") + await self._reconnect() + else: + log.info("EventSender detached. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if connecting_count < max_retries: + log.info("EventSender detached. Attempting reconnect.") + await self._reconnect() + else: + log.info("EventSender detached. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) + await self.close(error) + raise error + except errors.AMQPConnectionError as shutdown: + if connecting_count < max_retries: + log.info("EventSender connection lost. Attempting reconnect.") + await self._reconnect() + else: + log.info("EventSender connection lost. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) + await self.close(error) + raise error + except compat.TimeoutException as shutdown: + if connecting_count < max_retries: + log.info("EventSender timed out sending event data. Attempting reconnect.") + await self._reconnect() + else: + log.info("EventSender timed out. Shutting down.") + await self.close(shutdown) + raise TimeoutError(str(shutdown), shutdown) + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Send failed: {}".format(e)) await self.close(exception=error) raise error - except Exception as e: - log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Send failed: {}".format(e)) - await self.close(exception=error) - raise error - else: - return self._outcome + + def _check_closed(self): + if self.error: + raise EventHubError("This sender has been closed. Please create a new sender to send event data.", + self.error) @staticmethod - def _set_batching_label(event_datas, batching_label): + def _set_partition_key(event_datas, partition_key): ed_iter = iter(event_datas) for ed in ed_iter: - ed._batching_label = batching_label + ed._set_partition_key(partition_key) yield ed - async def send(self, event_data, batching_label=None): + async def send(self, event_data, partition_key=None): """ Sends an event data and blocks until acknowledgement is received or operation times out. :param event_data: The event to be sent. :type event_data: ~azure.eventhub.common.EventData + :param partition_key: With the given partition_key, event data will land to + a particular partition of the Event Hub decided by the service. + :type partition_key: str :raises: ~azure.eventhub.common.EventHubError if the message fails to send. - :return: The outcome of the message send. - :rtype: ~uamqp.constants.MessageSendResult + :return: None + :rtype: None Example: .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START eventhub_client_sync_send] - :end-before: [END eventhub_client_sync_send] + :start-after: [START eventhub_client_async_send] + :end-before: [END eventhub_client_async_send] :language: python :dedent: 4 :caption: Sends an event data and blocks until acknowledgement is received or operation times out. """ - if self.error: - raise self.error + self._check_closed() if isinstance(event_data, EventData): - if batching_label: - event_data._batching_label = batching_label + if partition_key: + event_data._set_partition_key(partition_key) wrapper_event_data = event_data else: wrapper_event_data = _BatchSendEventData( - self._set_batching_label(event_data, batching_label), - batching_label=batching_label) if batching_label else _BatchSendEventData(event_data) + self._set_partition_key(event_data, partition_key), + partition_key=partition_key) if partition_key else _BatchSendEventData(event_data) wrapper_event_data.message.on_send_complete = self._on_outcome - await self._send_event_data(wrapper_event_data) + self.unsent_events = [wrapper_event_data.message] + await self._send_event_data() def _on_outcome(self, outcome, condition): """ @@ -334,10 +349,13 @@ def _on_outcome(self, outcome, condition): :param outcome: The outcome of the message delivery - success or failure. :type outcome: ~uamqp.constants.MessageSendResult + :param condition: Detail information of the outcome. + """ self._outcome = outcome self._condition = condition @staticmethod def _error(outcome, condition): - return None if outcome == MessageSendResult.Ok else EventHubError(outcome, condition) + if outcome != MessageSendResult.Ok: + raise condition diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index 8fb7940850e9..f77dccf19203 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -15,6 +15,7 @@ from urllib import unquote_plus, urlencode, quote_plus except ImportError: from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus +from typing import Any, List, Dict import uamqp from uamqp import Message, AMQPClient @@ -22,12 +23,12 @@ from uamqp import constants from azure.eventhub import __version__ -from azure.eventhub.sender import Sender -from azure.eventhub.receiver import Receiver -from azure.eventhub.common import parse_sas_token +from azure.eventhub.sender import EventSender +from azure.eventhub.receiver import EventReceiver +from azure.eventhub.common import parse_sas_token, EventPosition from azure.eventhub.error import EventHubError from .client_abstract import EventHubClientAbstract -from .common import SASTokenCredentials, SharedKeyCredentials +from .common import EventHubSASTokenCredential, EventHubSharedKeyCredential log = logging.getLogger(__name__) @@ -63,7 +64,7 @@ def _create_auth(self, username=None, password=None): auth_timeout = self.config.auth_timeout # TODO: the following code can be refactored to create auth from classes directly instead of using if-else - if isinstance(self.credential, SharedKeyCredentials): + if isinstance(self.credential, EventHubSharedKeyCredential): username = username or self._auth_config['username'] password = password or self._auth_config['password'] if "@sas.root" in username: @@ -73,7 +74,7 @@ def _create_auth(self, username=None, password=None): self.auth_uri, username, password, timeout=auth_timeout, http_proxy=http_proxy, transport_type=transport_type) - elif isinstance(self.credential, SASTokenCredentials): + elif isinstance(self.credential, EventHubSASTokenCredential): token = self.credential.get_sas_token() try: expiry = int(parse_sas_token(token)['se']) @@ -93,16 +94,14 @@ def _create_auth(self, username=None, password=None): get_jwt_token, http_proxy=http_proxy, transport_type=transport_type) - def get_properties(self): + # type:() -> Dict[str, Any] """ - Get details on the specified EventHub. + Get properties of the specified EventHub. Keys in the details dictionary include: - -'name' - -'type' + -'path' -'created_at' - -'partition_count' -'partition_ids' :rtype: dict @@ -132,21 +131,27 @@ def get_properties(self): mgmt_client.close() def get_partition_ids(self): + # type:() -> List[str] + """ + Get partition ids of the specified EventHub. + + :rtype: list[str] + """ return self.get_properties()['partition_ids'] def get_partition_properties(self, partition): + # type:(str) -> Dict[str, str] """ - Get information on the specified partition async. + Get properties of the specified partition. Keys in the details dictionary include: - -'name' - -'type' - -'partition' - -'begin_sequence_number' + -'event_hub_path' + -'id' + -'beginning_sequence_number' -'last_enqueued_sequence_number' -'last_enqueued_offset' -'last_enqueued_time_utc' - -'is_partition_empty' + -'is_empty' :param partition: The target partition id. :type partition: str @@ -171,7 +176,6 @@ def get_partition_properties(self, partition): output = {} if partition_info: output['event_hub_path'] = partition_info[b'name'].decode('utf-8') - # output['type'] = partition_info[b'type'].decode('utf-8') output['id'] = partition_info[b'partition'].decode('utf-8') output['beginning_sequence_number'] = partition_info[b'begin_sequence_number'] output['last_enqueued_sequence_number'] = partition_info[b'last_enqueued_sequence_number'] @@ -184,24 +188,28 @@ def get_partition_properties(self, partition): mgmt_client.close() def create_receiver( - self, partition_id, consumer_group="$Default", event_position=None, exclusive_receiver_priority=None, operation=None, - prefetch=None, + self, partition_id, consumer_group="$Default", event_position=EventPosition.first_available_event(), + exclusive_receiver_priority=None, operation=None, prefetch=None, ): + # type: (str, str, EventPosition, int, str, int) -> EventReceiver """ - Add a receiver to the client for a particular consumer group and partition. + Create a receiver to the client for a particular consumer group and partition. - :param consumer_group: The name of the consumer group. + :param partition_id: The ID of the partition. + :type partition_id: str + :param consumer_group: The name of the consumer group. Default value is `$Default`. :type consumer_group: str - :param partition: The ID of the partition. - :type partition: str :param event_position: The position from which to start receiving. :type event_position: ~azure.eventhub.common.EventPosition - :param prefetch: The message prefetch count of the receiver. Default is 300. - :type prefetch: int - :operation: An optional operation to be appended to the hostname in the source URL. + :param exclusive_receiver_priority: The priority of the exclusive receiver. The client will create an exclusive + receiver if exclusive_receiver_priority is set. + :type exclusive_receiver_priority: int + :param operation: An optional operation to be appended to the hostname in the source URL. The value must start with `/` character. :type operation: str - :rtype: ~azure.eventhub.receiver.Receiver + :param prefetch: The message prefetch count of the receiver. Default is 300. + :type prefetch: int + :rtype: ~azure.eventhub.receiver.EventReceiver Example: .. literalinclude:: ../examples/test_examples_eventhub.py @@ -217,32 +225,27 @@ def create_receiver( path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, path, consumer_group, partition_id) - handler = Receiver( + handler = EventReceiver( self, source_url, event_position=event_position, exclusive_receiver_priority=exclusive_receiver_priority, prefetch=prefetch) return handler def create_sender(self, partition_id=None, operation=None, send_timeout=None): + # type: (str, str, float) -> EventSender """ - Add a sender to the client to send EventData object to an EventHub. + Create a sender to the client to send EventData object to an EventHub. - :param partition: Optionally specify a particular partition to send to. + :param partition_id: Optionally specify a particular partition to send to. If omitted, the events will be distributed to available partitions via round-robin. - :type parition: str - :operation: An optional operation to be appended to the hostname in the target URL. + :type partition_id: str + :param operation: An optional operation to be appended to the hostname in the target URL. The value must start with `/` character. :type operation: str :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is queued. Default value is 60 seconds. If set to 0, there will be no timeout. :type send_timeout: int - :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during - periods of inactivity. The default value is 30 seconds. If set to `None`, the connection will not - be pinged. - :type keep_alive: int - :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. - Default value is `True`. - :rtype: ~azure.eventhub.sender.Sender + :rtype: ~azure.eventhub.sender.EventSender Example: .. literalinclude:: ../examples/test_examples_eventhub.py @@ -258,6 +261,6 @@ def create_sender(self, partition_id=None, operation=None, send_timeout=None): target = target + operation send_timeout = self.config.send_timeout if send_timeout is None else send_timeout - handler = Sender( + handler = EventSender( self, target, partition=partition_id, send_timeout=send_timeout) return handler diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 26435fd93635..5534a848c640 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -20,7 +20,7 @@ from azure.eventhub import __version__ from azure.eventhub.configuration import Configuration from azure.eventhub import constants -from .common import SASTokenCredentials, SharedKeyCredentials, Address +from .common import EventHubSharedKeyCredential, _Address log = logging.getLogger(__name__) @@ -82,35 +82,25 @@ def _build_uri(address, entity): class EventHubClientAbstract(object): """ - The EventHubClient class defines a high level interface for sending + The EventHubClientAbstract class defines a high level interface for sending events to and receiving events from the Azure Event Hubs service. - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START create_eventhub_client] - :end-before: [END create_eventhub_client] - :language: python - :dedent: 4 - :caption: Create a new instance of the Event Hub client - """ def __init__(self, host, event_hub_path, credential, **kwargs): """ - Constructs a new EventHubClient with the given address URL. - - :param address: The full URI string of the Event Hub. This can optionally - include URL-encoded access name and key. - :type address: str - :param username: The name of the shared access policy. This must be supplied - if not encoded into the address. - :type username: str - :param password: The shared access key. This must be supplied if not encoded - into the address. - :type password: str - :param debug: Whether to output network trace logs to the logger. Default + Constructs a new EventHubClient. + + :param host: The hostname URI string of the the Event Hub. + :type host: str + :param event_hub_path: The path/name of the Event Hub + :type event_hub_path: str + :param network_tracing: Whether to output network trace logs to the logger. Default is `False`. - :type debug: bool + :type network_tracing: bool + :param credential: The credential object used for authentication which implements particular interface + of getting tokens. It accepts ~azure.eventhub.EventHubSharedKeyCredential, + ~azure.eventhub.EventHubSASTokenCredential, credential objects generated by the azure-identity library and + objects that implement get token interface. :param http_proxy: HTTP proxy settings. This must be a dictionary with the following keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). Additionally the following keys may also be present: 'username', 'password'. @@ -118,18 +108,34 @@ def __init__(self, host, event_hub_path, credential, **kwargs): :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. :type auth_timeout: int - :param sas_token: A SAS token or function that returns a SAS token. If a function is supplied, - it will be used to retrieve subsequent tokens in the case of token expiry. The function should - take no arguments. - :type sas_token: str or callable + :param user_agent: The user agent that needs to be appended to the built in user agent string. + :type user_agent: str + :param max_retries: The max number of attempts to redo the failed operation when an error happened. Default + value is 3. + :type max_retries: int + :param transport_type: The transport protocol type - default is ~uamqp.TransportType.Amqp. + ~uamqp.TransportType.AmqpOverWebsocket is applied when http_proxy is set or the + transport type is explicitly requested. + :type transport_type: ~azure.eventhub.TransportType + :param prefetch: The message prefetch count of the receiver. Default is 300. + :type prefetch: int + :param max_batch_size: Receive a batch of events. Batch size will be up to the maximum specified, but + will return as soon as service returns no new events. Default value is the same as prefetch. + :type max_batch_size: int + :param receive_timeout: The timeout time in seconds to receive a batch of events from an Event Hub. + Default value is 0 seconds. + :type receive_timeout: int + :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is + queued. Default value is 60 seconds. If set to 0, there will be no timeout. + :type send_timeout: int """ self.container_id = "eventhub.pysdk-" + str(uuid.uuid4())[:8] - self.address = Address() + self.address = _Address() self.address.hostname = host self.address.path = "/" + event_hub_path if event_hub_path else "" self._auth_config = {} self.credential = credential - if isinstance(credential, SharedKeyCredentials): + if isinstance(credential, EventHubSharedKeyCredential): self.username = credential.policy self.password = credential.key self._auth_config['username'] = self.username @@ -148,24 +154,44 @@ def __init__(self, host, event_hub_path, credential, **kwargs): log.info("%r: Created the Event Hub client", self.container_id) @classmethod - def from_connection_string(cls, conn_str, eventhub=None, **kwargs): + def from_connection_string(cls, conn_str, event_hub_path=None, **kwargs): """Create an EventHubClient from a connection string. :param conn_str: The connection string. :type conn_str: str - :param eventhub: The name of the EventHub, if the EntityName is + :param event_hub_path: The path/name of the Event Hub, if the EntityName is not included in the connection string. - :type eventhub: str - :param debug: Whether to output network trace logs to the logger. Default + :type event_hub_path: str + :param network_tracing: Whether to output network trace logs to the logger. Default is `False`. - :type debug: bool + :type network_tracing: bool :param http_proxy: HTTP proxy settings. This must be a dictionary with the following keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). Additionally the following keys may also be present: 'username', 'password'. :type http_proxy: dict[str, Any] :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. - :type auth_timeout: int + :type auth_timeout: float + :param user_agent: The user agent that needs to be appended to the built in user agent string. + :type user_agent: str + :param max_retries: The max number of attempts to redo the failed operation when an error happened. Default + value is 3. + :type max_retries: int + :param transport_type: The transport protocol type - default is ~uamqp.TransportType.Amqp. + ~uamqp.TransportType.AmqpOverWebsocket is applied when http_proxy is set or the + transport type is explicitly requested. + :type transport_type: ~azure.eventhub.TransportType + :param prefetch: The message prefetch count of the receiver. Default is 300. + :type prefetch: int + :param max_batch_size: Receive a batch of events. Batch size will be up to the maximum specified, but + will return as soon as service returns no new events. Default value is the same as prefetch. + :type max_batch_size: int + :param receive_timeout: The timeout time in seconds to receive a batch of events from an Event Hub. + Default value is 0 seconds. + :type receive_timeout: float + :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is + queued. Default value is 60 seconds. If set to 0, there will be no timeout. + :type send_timeout: float Example: .. literalinclude:: ../examples/test_examples_eventhub.py @@ -177,13 +203,13 @@ def from_connection_string(cls, conn_str, eventhub=None, **kwargs): """ address, policy, key, entity = _parse_conn_str(conn_str) - entity = eventhub or entity + entity = event_hub_path or entity left_slash_pos = address.find("//") if left_slash_pos != -1: host = address[left_slash_pos + 2:] else: host = address - return cls(host, entity, SharedKeyCredentials(policy, key), **kwargs) + return cls(host, entity, EventHubSharedKeyCredential(policy, key), **kwargs) @classmethod def from_iothub_connection_string(cls, conn_str, **kwargs): @@ -192,16 +218,36 @@ def from_iothub_connection_string(cls, conn_str, **kwargs): :param conn_str: The connection string. :type conn_str: str - :param debug: Whether to output network trace logs to the logger. Default + :param network_tracing: Whether to output network trace logs to the logger. Default is `False`. - :type debug: bool + :type network_tracing: bool :param http_proxy: HTTP proxy settings. This must be a dictionary with the following keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). Additionally the following keys may also be present: 'username', 'password'. :type http_proxy: dict[str, Any] :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. - :type auth_timeout: int + :type auth_timeout: float + :param user_agent: The user agent that needs to be appended to the built in user agent string. + :type user_agent: str + :param max_retries: The max number of attempts to redo the failed operation when an error happened. Default + value is 3. + :type max_retries: int + :param transport_type: The transport protocol type - default is ~uamqp.TransportType.Amqp. + ~uamqp.TransportType.AmqpOverWebsocket is applied when http_proxy is set or the + transport type is explicitly requested. + :type transport_type: ~azure.eventhub.TransportType + :param prefetch: The message prefetch count of the receiver. Default is 300. + :type prefetch: int + :param max_batch_size: Receive a batch of events. Batch size will be up to the maximum specified, but + will return as soon as service returns no new events. Default value is the same as prefetch. + :type max_batch_size: int + :param receive_timeout: The timeout time in seconds to receive a batch of events from an Event Hub. + Default value is 0 seconds. + :type receive_timeout: float + :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is + queued. Default value is 60 seconds. If set to 0, there will be no timeout. + :type send_timeout: float Example: .. literalinclude:: ../examples/test_examples_eventhub.py @@ -221,7 +267,7 @@ def from_iothub_connection_string(cls, conn_str, **kwargs): host = address[left_slash_pos + 2:] else: host = address - client = cls(host, "", SharedKeyCredentials(username, password), **kwargs) + client = cls(host, "", EventHubSharedKeyCredential(username, password), **kwargs) client._auth_config = { # pylint: disable=protected-access 'iot_username': policy, 'iot_password': key, @@ -233,7 +279,7 @@ def from_iothub_connection_string(cls, conn_str, **kwargs): def _create_auth(self, username=None, password=None): pass - def create_properties(self, user_agent=None): # pylint: disable=no-self-use + def _create_properties(self, user_agent=None): # pylint: disable=no-self-use """ Format the properties with which to instantiate the connection. This acts like a user agent over HTTP. @@ -270,10 +316,12 @@ def _process_redirect_uri(self, redirect): @abstractmethod def create_receiver( - self, consumer_group, partition, epoch=None, offset=None, prefetch=300, - operation=None): + self, partition_id, consumer_group="$Default", event_position=None, exclusive_receiver_priority=None, + operation=None, + prefetch=None, + ): pass @abstractmethod - def create_sender(self, partition=None, operation=None, send_timeout=60): + def create_sender(self, partition_id=None, operation=None, send_timeout=None): pass diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index 3af21e5d2e86..80469a229e2d 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -82,7 +82,7 @@ def __init__(self, body=None, to_device=None, message=None): self._annotations = message.annotations self._app_properties = message.application_properties else: - if isinstance(body, list) and body: + if body and isinstance(body, list): self.message = Message(body[0], properties=self.msg_properties) for more in body[1:]: self.message._body.append(more) # pylint: disable=protected-access @@ -105,11 +105,11 @@ def __str__(self): dic['enqueued_time'] = str(self.enqueued_time) if self.device_id: dic['device_id'] = str(self.device_id) - if self._batching_label: - dic['_batching_label'] = str(self._batching_label) + if self.partition_key: + dic['partition_key'] = str(self.partition_key) + return str(dic) - return str(dic) @property def sequence_number(self): @@ -155,7 +155,7 @@ def device_id(self): return self._annotations.get(EventData.PROP_DEVICE_ID, None) @property - def _batching_label(self): + def partition_key(self): """ The partition key of the event data object. @@ -166,8 +166,7 @@ def _batching_label(self): except KeyError: return self._annotations.get(EventData.PROP_PARTITION_KEY, None) - @_batching_label.setter - def _batching_label(self, value): + def _set_partition_key(self, value): """ Set the partition key of the event data object. @@ -256,11 +255,11 @@ def encode_message(self): class _BatchSendEventData(EventData): - def __init__(self, batch_event_data, batching_label=None): + def __init__(self, batch_event_data, partition_key=None): self.message = BatchMessage(data=batch_event_data, multi_messages=False, properties=None) - self.set_batching_label(batching_label) + self._set_partition_key(partition_key) - def set_batching_label(self, value): + def _set_partition_key(self, value): if value: annotations = self.message.annotations if annotations is None: @@ -292,9 +291,9 @@ class EventPosition(object): def __init__(self, value, inclusive=False): """ - Initialize Offset. + Initialize EventPosition. - :param value: The offset value. + :param value: The event position value. :type value: ~datetime.datetime or int or str :param inclusive: Whether to include the supplied value as the start point. :type inclusive: bool @@ -305,7 +304,7 @@ def __init__(self, value, inclusive=False): def __str__(self): return str(self.value) - def selector(self): + def _selector(self): """ Creates a selector expression of the offset. @@ -319,34 +318,81 @@ def selector(self): return ("amqp.annotation.x-opt-sequence-number {} '{}'".format(operator, self.value)).encode('utf-8') return ("amqp.annotation.x-opt-offset {} '{}'".format(operator, self.value)).encode('utf-8') - @staticmethod - def first_available(): - return FIRST_AVAILABLE + @classmethod + def first_available_event(cls): + """ + Get the beginning of the event stream. + + :rtype: azure.eventhub.common.EventPosition + """ + + return cls("-1") @classmethod def new_events_only(cls): - return NEW_EVENTS_ONLY + """ + Get the end of the event stream. + + :rtype: azure.eventhub.common.EventPosition + """ - @staticmethod - def from_offset(offset, inclusive=False): - return EventPosition(offset, inclusive) + return cls("@latest") - @staticmethod - def from_sequence(sequence, inclusive=False): - return EventPosition(sequence, inclusive) + @classmethod + def from_offset(cls, offset, inclusive=False): + """ + Get the event position from/after the specified offset. - @staticmethod - def from_enqueued_time(enqueued_time, inclusive=False): - return EventPosition(enqueued_time, inclusive) + :param offset: the offset value + :type offset: str + :param inclusive: Whether to include the supplied value as the start point. + :type inclusive: bool + :rtype: azure.eventhub.common.EventPosition + """ + return cls(offset, inclusive) -FIRST_AVAILABLE = EventPosition("-1") -NEW_EVENTS_ONLY = EventPosition("@latest") + @classmethod + def from_sequence(cls, sequence, inclusive=False): + """ + Get the event position from/after the specified sequence number. + + :param sequence: the sequence number + :type sequence: int, long + :param inclusive: Whether to include the supplied value as the start point. + :type inclusive: bool + :rtype: azure.eventhub.common.EventPosition + """ + + return cls(sequence, inclusive) + + @classmethod + def from_enqueued_time(cls, enqueued_time, inclusive=False): + """ + Get the event position from/after the specified enqueue time. + + :param enqueued_time: the enqueue datetime + :type enqueued_time: datetime.datetime + :param inclusive: Whether to include the supplied value as the start point. + :type inclusive: bool + :rtype: azure.eventhub.common.EventPosition + """ + + return cls(enqueued_time, inclusive) # TODO: move some behaviors to these two classes. -class SASTokenCredentials(object): +class EventHubSASTokenCredential(object): + """ + SAS token used for authentication. + """ def __init__(self, token): + """ + :param token: A SAS token or function that returns a SAS token. If a function is supplied, + it will be used to retrieve subsequent tokens in the case of token expiry. The function should + take no arguments. + :type token: str or callable + """ self.token = token def get_sas_token(self): @@ -356,13 +402,23 @@ def get_sas_token(self): return self.token -class SharedKeyCredentials(object): +class EventHubSharedKeyCredential(object): + """ + The shared access key credential used for authentication. + """ def __init__(self, policy, key): + """ + :param policy: The name of the shared access policy. + :type policy: str + :param key: The shared access key. + :type key: str + """ + self.policy = policy self.key = key -class Address(object): +class _Address(object): def __init__(self, hostname=None, path=None): self.hostname = hostname self.path = path diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py index b6e030c9e3a6..72211d69076e 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py @@ -16,6 +16,6 @@ def __init__(self, **kwargs): else kwargs.get("transport_type", TransportType.Amqp) self.auth_timeout = kwargs.get("auth_timeout", 60) self.prefetch = kwargs.get("prefetch", 300) - self.max_batch_size = kwargs.get("max_batch_size") + self.max_batch_size = kwargs.get("max_batch_size", self.prefetch) self.receive_timeout = kwargs.get("receive_timeout", 0) self.send_timeout = kwargs.get("send_timeout", 60) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py index 69aaa701496b..a93d052af4fe 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py @@ -5,7 +5,7 @@ from uamqp import types, constants, errors import six -from azure.core import AzureError +from azure.core.exceptions import AzureError _NO_RETRY_ERRORS = ( b"com.microsoft:argument-out-of-range", @@ -15,6 +15,7 @@ b"com.microsoft:argument-error" ) + def _error_handler(error): """ Called internally when an event has failed to send so we @@ -95,14 +96,46 @@ def _parse_error(self, error_list): self.details = details -class AuthenticationError(EventHubError): +class ConnectionLostError(EventHubError): + """Connection to event hub is lost. SDK will retry. So this shouldn't happen. + + """ pass class ConnectError(EventHubError): + """Fail to connect to event hubs + + """ + pass + + +class AuthenticationError(ConnectError): + """Fail to connect to event hubs because of authentication problem + + + """ pass class EventDataError(EventHubError): + """Problematic event data so the send will fail at client side + + """ + pass + + +class EventDataSendError(EventHubError): + """Service returns error while an event data is being sent + + """ pass +''' +class ConnectionTimeoutError(ConnectError): + """Time out when accessing event hub service + Should retry? + + """ +''' + diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py index 4643cb29419f..ac122b9e1881 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py @@ -9,32 +9,26 @@ import time from uamqp import types, errors +from uamqp import compat from uamqp import ReceiveClient, Source from azure.eventhub.common import EventData -from azure.eventhub.error import EventHubError, AuthenticationError, ConnectError, _error_handler +from azure.eventhub.error import EventHubError, AuthenticationError, ConnectError, ConnectionLostError, _error_handler log = logging.getLogger(__name__) -class Receiver(object): +class EventReceiver(object): """ - Implements a Receiver. - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START create_eventhub_client_receiver_instance] - :end-before: [END create_eventhub_client_receiver_instance] - :language: python - :dedent: 4 - :caption: Create a new instance of the Receiver. + Implements a EventReceiver. """ timeout = 0 _epoch = b'com.microsoft:epoch' - def __init__(self, client, source, event_position=None, prefetch=300, exclusive_receiver_priority=None, keep_alive=None, auto_reconnect=True): + def __init__(self, client, source, event_position=None, prefetch=300, exclusive_receiver_priority=None, + keep_alive=None, auto_reconnect=True): """ Instantiate a receiver. @@ -45,8 +39,9 @@ def __init__(self, client, source, event_position=None, prefetch=300, exclusive_ :param prefetch: The number of events to prefetch from the service for processing. Default is 300. :type prefetch: int - :param epoch: An optional epoch value. - :type epoch: int + :param exclusive_receiver_priority: The priority of the exclusive receiver. It will an exclusive + receiver if exclusive_receiver_priority is set. + :type exclusive_receiver_priority: int """ self.running = False self.client = client @@ -66,7 +61,7 @@ def __init__(self, client, source, event_position=None, prefetch=300, exclusive_ self.name = "EHReceiver-{}-partition{}".format(uuid.uuid4(), partition) source = Source(self.source) if self.offset is not None: - source.set_filter(self.offset.selector()) + source.set_filter(self.offset._selector()) # pylint: disable=protected-access if exclusive_receiver_priority: self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(exclusive_receiver_priority))} self._handler = ReceiveClient( @@ -79,7 +74,7 @@ def __init__(self, client, source, event_position=None, prefetch=300, exclusive_ error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties(self.client.config.user_agent)) + properties=self.client._create_properties(self.client.config.user_agent)) # pylint: disable=protected-access def __enter__(self): return self @@ -92,7 +87,10 @@ def __iter__(self): def __next__(self): self._open() + max_retries = self.client.config.max_retries + connecting_count = 0 while True: + connecting_count += 1 try: if not self.messages_iter: self.messages_iter = self._handler.receive_messages_iter() @@ -100,59 +98,90 @@ def __next__(self): event_data = EventData(message=message) self.offset = event_data.offset return event_data - except (errors.TokenExpired, errors.AuthenticationException): - log.info("Receiver disconnected due to token error. Attempting reconnect.") - self.reconnect() + except errors.AuthenticationException as auth_error: + if connecting_count < max_retries: + log.info("EventReceiver disconnected due to token error. Attempting reconnect.") + self._reconnect() + else: + log.info("EventReceiver authentication failed. Shutting down.") + error = AuthenticationError(str(auth_error), auth_error) + self.close(auth_error) + raise error except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry and self.auto_reconnect: - log.info("Receiver detached. Attempting reconnect.") - self.reconnect() + log.info("EventReceiver detached. Attempting reconnect.") + self._reconnect() else: - log.info("Receiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) + log.info("EventReceiver detached. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("Receiver detached. Attempting reconnect.") - self.reconnect() + if connecting_count < max_retries: + log.info("EventReceiver detached. Attempting reconnect.") + self._reconnect() else: - log.info("Receiver detached. Shutting down.") - error = EventHubError(str(shutdown), shutdown) - self.close(exception=error) + log.info("EventReceiver detached. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) + self.close(error) + raise error + except errors.AMQPConnectionError as shutdown: + if connecting_count < max_retries: + log.info("EventReceiver connection lost. Attempting reconnect.") + self._reconnect() + else: + log.info("EventReceiver connection lost. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) + self.close(error) raise error + except compat.TimeoutException as shutdown: + if connecting_count < max_retries: + log.info("EventReceiver timed out receiving event data. Attempting reconnect.") + self._reconnect() + else: + log.info("EventReceiver timed out. Shutting down.") + self.close(shutdown) + raise TimeoutError(str(shutdown), shutdown) except StopIteration: raise + except KeyboardInterrupt: + log.info("EventReceiver stops due to keyboard interrupt") + print("EventReceiver stopped") + self.close() + raise except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) error = EventHubError("Receive failed: {}".format(e)) self.close(exception=error) raise error + def _check_closed(self): + if self.error: + raise EventHubError("This receiver has been closed. Please create a new receiver to receive event data.", + self.error) + + def _redirect(self, redirect): + self.redirected = redirect + self.running = False + self.messages_iter = None + self._open() + def _open(self): """ - Open the Receiver using the supplied conneciton. + Open the EventReceiver using the supplied connection. If the handler has previously been redirected, the redirect context will be used to create a new handler before opening it. - :param connection: The underlying client shared connection. - :type: connection: ~uamqp.connection.Connection - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START eventhub_client_receiver_open] - :end-before: [END eventhub_client_receiver_open] - :language: python - :dedent: 4 - :caption: Open the Receiver using the supplied conneciton. - """ # pylint: disable=protected-access + self._check_closed() if self.redirected: + self.client._process_redirect_uri(self.redirected) self.source = self.redirected.address source = Source(self.source) if self.offset is not None: - source.set_filter(self.offset.selector()) + source.set_filter(self.offset._selector()) + alt_creds = { "username": self.client._auth_config.get("iot_username"), "password":self.client._auth_config.get("iot_password")} @@ -166,105 +195,99 @@ def _open(self): error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties(self.client.config.user_agent)) + properties=self.client._create_properties(self.client.config.user_agent)) # pylint: disable=protected-access if not self.running: - try: - self._handler.open() - self.running = True - while not self._handler.client_ready(): - time.sleep(0.05) - - except errors.AuthenticationException: - log.info("Receiver failed authentication. Retrying...") - self.reconnect() - except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("Receiver detached. Attempting reconnect.") - self.reconnect() - else: - log.info("Receiver detached. Failed to connect") - error = ConnectError(str(shutdown), shutdown) - raise error - except errors.AMQPConnectionError as shutdown: - if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: - log.info("Receiver couldn't authenticate (%r).", shutdown) - error = AuthenticationError(str(shutdown), shutdown) - raise error - else: - log.info("Receiver connection error (%r).", shutdown) - error = ConnectError(str(shutdown), shutdown) - raise error - except Exception as e: - log.info("Unexpected error occurred (%r)", e) - error = EventHubError("Receiver connect failed: {}".format(e)) - raise error + self._connect() + self.running = True + + def _connect(self): + connected = self._build_connection() + if not connected: + time.sleep(self.reconnect_backoff) + while not self._build_connection(is_reconnect=True): + time.sleep(self.reconnect_backoff) + + def _build_connection(self, is_reconnect=False): + """ - def _reconnect(self): # pylint: disable=too-many-statements + :param is_reconnect: True - trying to reconnect after fail to connect or a connection is lost. + False - the 1st time to connect + :return: True - connected. False - not connected + """ # pylint: disable=protected-access - alt_creds = { - "username": self.client._auth_config.get("iot_username"), - "password": self.client._auth_config.get("iot_password")} - self._handler.close() - source = Source(self.source) - if self.offset is not None: - source.set_filter(self.offset.selector()) - self._handler = ReceiveClient( - source, - auth=self.client.get_auth(**alt_creds), - debug=self.client.config.network_tracing, - prefetch=self.prefetch, - link_properties=self.properties, - timeout=self.timeout, - error_policy=self.retry_policy, - keep_alive_interval=self.keep_alive, - client_name=self.name, - properties=self.client.create_properties(self.client.config.user_agent)) - self.messages_iter = None + if is_reconnect: + alt_creds = { + "username": self.client._auth_config.get("iot_username"), + "password": self.client._auth_config.get("iot_password")} + self._handler.close() + source = Source(self.source) + if self.offset is not None: + source.set_filter(self.offset._selector()) + self._handler = ReceiveClient( + source, + auth=self.client.get_auth(**alt_creds), + debug=self.client.config.network_tracing, + prefetch=self.prefetch, + link_properties=self.properties, + timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client._create_properties( + self.client.config.user_agent)) # pylint: disable=protected-access + self.messages_iter = None try: self._handler.open() while not self._handler.client_ready(): time.sleep(0.05) return True except errors.AuthenticationException as shutdown: - log.info("Receiver disconnected due to token expiry. Shutting down.") - error = AuthenticationError(str(shutdown), shutdown) - self.close(exception=error) - raise error + if is_reconnect: + log.info("EventReceiver couldn't authenticate. Shutting down. (%r)", shutdown) + error = AuthenticationError(str(shutdown), shutdown) + self.close(exception=error) + raise error + else: + log.info("EventReceiver couldn't authenticate. Attempting reconnect.") + return False + except errors.LinkRedirect as redirect: + self._redirect(redirect) + return True except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("Receiver detached. Attempting reconnect.") + if shutdown.action.retry: + log.info("EventReceiver detached. Attempting reconnect.") return False - log.info("Receiver detached. Shutting down.") - error = ConnectError(str(shutdown), shutdown) - self.close(exception=error) - raise error + else: + log.info("EventReceiver detached. Shutting down.") + error = ConnectError(str(shutdown), shutdown) + self.close(exception=error) + raise error except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("Receiver detached. Attempting reconnect.") + if is_reconnect: + log.info("EventReceiver detached. Shutting down.") + error = ConnectError(str(shutdown), shutdown) + self.close(exception=error) + raise error + else: + log.info("EventReceiver detached. Attempting reconnect.") return False - log.info("Receiver detached. Shutting down.") - error = ConnectError(str(shutdown), shutdown) - self.close(exception=error) - raise error except errors.AMQPConnectionError as shutdown: - if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: - log.info("Receiver couldn't authenticate. Attempting reconnect.") + if is_reconnect: + log.info("EventReceiver connection error (%r). Shutting down.", shutdown) + error = AuthenticationError(str(shutdown), shutdown) + self.close(exception=error) + raise error + else: + log.info("EventReceiver couldn't authenticate. Attempting reconnect.") return False - log.info("Receiver connection error (%r). Shutting down.", shutdown) - error = ConnectError(str(shutdown), shutdown) - self.close(exception=error) - raise error except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Receiver reconnect failed: {}".format(e)) + error = EventHubError("EventReceiver reconnect failed: {}".format(e)) self.close(exception=error) raise error - def reconnect(self): - """If the Receiver was disconnected from the service with - a retryable error - attempt to reconnect.""" - while not self._reconnect(): - time.sleep(self.reconnect_backoff) + def _reconnect(self): + return self._build_connection(is_reconnect=True) def close(self, exception=None): """ @@ -285,6 +308,9 @@ def close(self, exception=None): :caption: Close down the handler. """ + if self.messages_iter: + self.messages_iter.close() + self.messages_iter = None self.running = False if self.error: return @@ -320,6 +346,11 @@ def receive(self, max_batch_size=None, timeout=None): retrieve before the time, the result will be empty. If no batch size is supplied, the prefetch size will be the maximum. :type max_batch_size: int + :param timeout: The timeout time in seconds to receive a batch of events + from an Event Hub. Results will be returned after timeout. If combined + with max_batch_size, it will return after either the count of received events + reaches the max_batch_size or the operation has timed out. + :type timeout: int :rtype: list[~azure.eventhub.common.EventData] Example: @@ -331,45 +362,80 @@ def receive(self, max_batch_size=None, timeout=None): :caption: Receive events from the EventHub. """ - if self.error: - raise self.error + self._check_closed() self._open() + max_batch_size = min(self.client.config.max_batch_size, self.prefetch) if max_batch_size is None else max_batch_size + timeout = self.client.config.receive_timeout if timeout is None else timeout + data_batch = [] + max_retries = self.client.config.max_retries + connecting_count = 0 while True: + connecting_count += 1 try: timeout_ms = 1000 * timeout if timeout else 0 message_batch = self._handler.receive_message_batch( - max_batch_size=max_batch_size, + max_batch_size=max_batch_size - (len(data_batch) if data_batch else 0), timeout=timeout_ms) for message in message_batch: event_data = EventData(message=message) self.offset = event_data.offset data_batch.append(event_data) return data_batch - except (errors.TokenExpired, errors.AuthenticationException): - log.info("Receiver disconnected due to token error. Attempting reconnect.") - self.reconnect() + except errors.AuthenticationException as auth_error: + if connecting_count < max_retries: + log.info("EventReceiver disconnected due to token error. Attempting reconnect.") + self._reconnect() + else: + log.info("EventReceiver authentication failed. Shutting down.") + error = AuthenticationError(str(auth_error), auth_error) + self.close(auth_error) + raise error except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry and self.auto_reconnect: - log.info("Receiver detached. Attempting reconnect.") - self.reconnect() + log.info("EventReceiver detached. Attempting reconnect.") + self._reconnect() else: - log.info("Receiver detached. Shutting down.") - error = ConnectError(str(shutdown), shutdown) + log.info("EventReceiver detached. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("Receiver detached. Attempting reconnect.") - self.reconnect() + if connecting_count < max_retries: + log.info("EventReceiver detached. Attempting reconnect.") + self._reconnect() else: - log.info("Receiver detached. Shutting down.") - error = ConnectError(str(shutdown), shutdown) - self.close(exception=error) + log.info("EventReceiver detached. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) + self.close(error) raise error + except errors.AMQPConnectionError as shutdown: + if connecting_count < max_retries: + log.info("EventReceiver connection lost. Attempting reconnect.") + self._reconnect() + else: + log.info("EventReceiver connection lost. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) + self.close(error) + raise error + except compat.TimeoutException as shutdown: + if connecting_count < max_retries: + log.info("EventReceiver timed out receiving event data. Attempting reconnect.") + self._reconnect() + else: + log.info("EventReceiver timed out. Shutting down.") + self.close(shutdown) + raise TimeoutError(str(shutdown), shutdown) + except KeyboardInterrupt: + log.info("EventReceiver stops due to keyboard interrupt") + print("EventReceiver stopped") + self.close() + raise except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) error = EventHubError("Receive failed: {}".format(e)) self.close(exception=error) raise error + + next = __next__ # for python2.7 diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py index ccb193835c20..5e6754281428 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py @@ -9,33 +9,26 @@ import time from uamqp import constants, errors +from uamqp import compat from uamqp import SendClient from uamqp.constants import MessageSendResult from azure.eventhub.common import EventData, _BatchSendEventData from azure.eventhub.error import EventHubError, ConnectError, \ - AuthenticationError, EventDataError, _error_handler + AuthenticationError, EventDataError, EventDataSendError, ConnectionLostError, _error_handler log = logging.getLogger(__name__) -class Sender(object): +class EventSender(object): """ - Implements a Sender. - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START create_eventhub_client_sender_instance] - :end-before: [END create_eventhub_client_sender_instance] - :language: python - :dedent: 4 - :caption: Create a new instance of the Sender. + Implements a EventSender. """ def __init__(self, client, target, partition=None, send_timeout=60, keep_alive=None, auto_reconnect=True): """ - Instantiate an EventHub event Sender handler. + Instantiate an EventHub event EventSender handler. :param client: The parent EventHubClient. :type client: ~azure.eventhub.client.EventHubClient. @@ -46,10 +39,10 @@ def __init__(self, client, target, partition=None, send_timeout=60, keep_alive=N :type partition: str :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is queued. Default value is 60 seconds. If set to 0, there will be no timeout. - :type send_timeout: int + :type send_timeout: float :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during periods of inactivity. The default value is None, i.e. no keep alive pings. - :type keep_alive: int + :type keep_alive: float :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. Default value is `True`. :type auto_reconnect: bool @@ -66,6 +59,7 @@ def __init__(self, client, target, partition=None, send_timeout=60, keep_alive=N self.retry_policy = errors.ErrorPolicy(max_retries=self.client.config.max_retries, on_error=_error_handler) self.reconnect_backoff = 1 self.name = "EHSender-{}".format(uuid.uuid4()) + self.unsent_events = None if partition: self.target += "/Partitions/" + partition self.name += "-partition{}".format(partition) @@ -77,7 +71,7 @@ def __init__(self, client, target, partition=None, send_timeout=60, keep_alive=N error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties(self.client.config.user_agent)) + properties=self.client._create_properties(self.client.config.user_agent)) # pylint: disable=protected-access self._outcome = None self._condition = None @@ -89,22 +83,13 @@ def __exit__(self, exc_type, exc_val, exc_tb): def _open(self): """ - Open the Sender using the supplied conneciton. + Open the EventSender using the supplied connection. If the handler has previously been redirected, the redirect context will be used to create a new handler before opening it. - :param connection: The underlying client shared connection. - :type: connection: ~uamqp.connection.Connection - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START eventhub_client_sender_open] - :end-before: [END eventhub_client_sender_open] - :language: python - :dedent: 4 - :caption: Open the Sender using the supplied conneciton. - """ + # pylint: disable=protected-access + self._check_closed() if self.redirected: self.target = self.redirected.address self._handler = SendClient( @@ -115,98 +100,86 @@ def _open(self): error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client.create_properties(self.client.config.user_agent)) + properties=self.client._create_properties(self.client.config.user_agent)) if not self.running: - try: - self._handler.open() - self.running = True - while not self._handler.client_ready(): - time.sleep(0.05) - except errors.AuthenticationException: - log.info("Sender failed authentication. Retrying...") - self.reconnect() - except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("Sender detached. Attempting reconnect.") - self.reconnect() - else: - log.info("Sender detached. Failed to connect") - error = ConnectError(str(shutdown), shutdown) - raise error - except errors.AMQPConnectionError as shutdown: - if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: - log.info("Sender couldn't authenticate.", shutdown) - error = AuthenticationError(str(shutdown), shutdown) - raise error - else: - log.info("Sender connection error (%r).", shutdown) - error = ConnectError(str(shutdown), shutdown) - raise error - except Exception as e: - log.info("Unexpected error occurred (%r)", e) - error = EventHubError("Sender connect failed: {}".format(e)) - raise error + self._connect() + self.running = True - def _reconnect(self): + def _connect(self): + connected = self._build_connection() + if not connected: + time.sleep(self.reconnect_backoff) + while not self._build_connection(is_reconnect=True): + time.sleep(self.reconnect_backoff) + + def _build_connection(self, is_reconnect=False): + """ + + :param is_reconnect: True - trying to reconnect after fail to connect or a connection is lost. + False - the 1st time to connect + :return: True - connected. False - not connected + """ # pylint: disable=protected-access - self._handler.close() - unsent_events = self._handler.pending_messages - self._handler = SendClient( - self.target, - auth=self.client.get_auth(), - debug=self.client.config.network_tracing, - msg_timeout=self.timeout, - error_policy=self.retry_policy, - keep_alive_interval=self.keep_alive, - client_name=self.name, - properties=self.client.create_properties(self.client.config.user_agent)) + if is_reconnect: + self._handler.close() + self._handler = SendClient( + self.target, + auth=self.client.get_auth(), + debug=self.client.config.network_tracing, + msg_timeout=self.timeout, + error_policy=self.retry_policy, + keep_alive_interval=self.keep_alive, + client_name=self.name, + properties=self.client._create_properties(self.client.config.user_agent)) try: self._handler.open() while not self._handler.client_ready(): time.sleep(0.05) - self._handler.queue_message(*unsent_events) - self._handler.wait() return True except errors.AuthenticationException as shutdown: - log.info("Sender disconnected due to token expiry. Shutting down.") - error = AuthenticationError(str(shutdown), shutdown) - self.close(exception=error) - raise error + if is_reconnect: + log.info("EventSender couldn't authenticate. Shutting down. (%r)", shutdown) + error = AuthenticationError(str(shutdown), shutdown) + self.close(exception=error) + raise error + else: + log.info("EventSender couldn't authenticate. Attempting reconnect.") + return False except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("Sender detached. Attempting reconnect.") + if shutdown.action.retry: + log.info("EventSender detached. Attempting reconnect.") return False - log.info("Sender reconnect failed. Shutting down.") - error = ConnectError(str(shutdown), shutdown) - self.close(exception=error) - raise error + else: + log.info("EventSender detached. Shutting down.") + error = ConnectError(str(shutdown), shutdown) + self.close(exception=error) + raise error except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("Sender detached. Attempting reconnect.") + if is_reconnect: + log.info("EventSender detached. Shutting down.") + error = ConnectError(str(shutdown), shutdown) + self.close(exception=error) + raise error + else: + log.info("EventSender detached. Attempting reconnect.") return False - log.info("Sender reconnect failed. Shutting down.") - error = ConnectError(str(shutdown), shutdown) - self.close(exception=error) - raise error except errors.AMQPConnectionError as shutdown: - if str(shutdown).startswith("Unable to open authentication session") and self.auto_reconnect: - log.info("Sender couldn't authenticate. Attempting reconnect.") + if is_reconnect: + log.info("EventSender connection error (%r). Shutting down.", shutdown) + error = AuthenticationError(str(shutdown), shutdown) + self.close(exception=error) + raise error + else: + log.info("EventSender couldn't authenticate. Attempting reconnect.") return False - log.info("Sender connection error (%r). Shutting down.", shutdown) - error = ConnectError(str(shutdown), shutdown) - self.close(exception=error) - raise error except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Sender Reconnect failed: {}".format(e)) + error = EventHubError("EventSender Reconnect failed: {}".format(e)) self.close(exception=error) raise error - def reconnect(self): - """If the Sender was disconnected from the service with - a retryable error - attempt to reconnect.""" - while not self._reconnect(): - time.sleep(self.reconnect_backoff) + def _reconnect(self): + return self._build_connection(is_reconnect=True) def close(self, exception=None): """ @@ -240,64 +213,107 @@ def close(self, exception=None): self.error = EventHubError("This send handler is now closed.") self._handler.close() - def _send_event_data(self, event_data): + def _send_event_data(self): self._open() - - try: - self._handler.send_message(event_data.message) - if self._outcome != MessageSendResult.Ok: - raise Sender._error(self._outcome, self._condition) - except errors.MessageException as failed: - error = EventDataError(str(failed), failed) - self.close(exception=error) - raise error - except (errors.TokenExpired, errors.AuthenticationException): - log.info("Sender disconnected due to token error. Attempting reconnect.") - self.reconnect() - except (errors.LinkDetach, errors.ConnectionClose) as shutdown: - if shutdown.action.retry and self.auto_reconnect: - log.info("Sender detached. Attempting reconnect.") - self.reconnect() - else: - log.info("Sender detached. Shutting down.") - error = ConnectError(str(shutdown), shutdown) + max_retries = self.client.config.max_retries + connecting_count = 0 + while True: + connecting_count += 1 + try: + if self.unsent_events: + self._handler.queue_message(*self.unsent_events) + self._handler.wait() + self.unsent_events = self._handler.pending_messages + if self._outcome != constants.MessageSendResult.Ok: + EventSender._error(self._outcome, self._condition) + return + except (errors.MessageAccepted, + errors.MessageAlreadySettled, + errors.MessageModified, + errors.MessageRejected, + errors.MessageReleased, + errors.MessageContentTooLarge) as msg_error: + raise EventDataError(str(msg_error), msg_error) + except errors.MessageException as failed: + log.info("Send event data error (%r)", failed) + error = EventDataSendError(str(failed), failed) self.close(exception=error) raise error - except errors.MessageHandlerError as shutdown: - if self.auto_reconnect: - log.info("Sender detached. Attempting reconnect.") - self.reconnect() - else: - log.info("Sender detached. Shutting down.") - error = ConnectError(str(shutdown), shutdown) + except errors.AuthenticationException as auth_error: + if connecting_count < max_retries: + log.info("EventSender disconnected due to token error. Attempting reconnect.") + self._reconnect() + else: + log.info("EventSender authentication failed. Shutting down.") + error = AuthenticationError(str(auth_error), auth_error) + self.close(auth_error) + raise error + except (errors.LinkDetach, errors.ConnectionClose) as shutdown: + if shutdown.action.retry: + log.info("EventSender detached. Attempting reconnect.") + self._reconnect() + else: + log.info("EventSender detached. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) + self.close(exception=error) + raise error + except errors.MessageHandlerError as shutdown: + if connecting_count < max_retries: + log.info("EventSender detached. Attempting reconnect.") + self._reconnect() + else: + log.info("EventSender detached. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) + self.close(error) + raise error + except errors.AMQPConnectionError as shutdown: + if connecting_count < max_retries: + log.info("EventSender connection lost. Attempting reconnect.") + self._reconnect() + else: + log.info("EventSender connection lost. Shutting down.") + error = ConnectionLostError(str(shutdown), shutdown) + self.close(error) + raise error + except compat.TimeoutException as shutdown: + if connecting_count < max_retries: + log.info("EventSender timed out sending event data. Attempting reconnect.") + self._reconnect() + else: + log.info("EventSender timed out. Shutting down.") + self.close(shutdown) + raise TimeoutError(str(shutdown), shutdown) + except Exception as e: + log.info("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Send failed: {}".format(e)) self.close(exception=error) raise error - except Exception as e: - log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Send failed: {}".format(e)) - self.close(exception=error) - raise error - else: - return self._outcome + + def _check_closed(self): + if self.error: + raise EventHubError("This sender has been closed. Please create a new sender to send event data.", self.error) @staticmethod - def _set_batching_label(event_datas, batching_label): + def _set_partition_key(event_datas, partition_key): ed_iter = iter(event_datas) for ed in ed_iter: - ed._batching_label = batching_label + ed._set_partition_key(partition_key) yield ed - def send(self, event_data, batching_label=None): + def send(self, event_data, partition_key=None): """ Sends an event data and blocks until acknowledgement is received or operation times out. :param event_data: The event to be sent. :type event_data: ~azure.eventhub.common.EventData + :param partition_key: With the given partition_key, event data will land to + a particular partition of the Event Hub decided by the service. + :type batching_label: str :raises: ~azure.eventhub.common.EventHubError if the message fails to send. - :return: The outcome of the message send. - :rtype: ~uamqp.constants.MessageSendResult + :return: None + :rtype: None Example: .. literalinclude:: ../examples/test_examples_eventhub.py @@ -308,18 +324,18 @@ def send(self, event_data, batching_label=None): :caption: Sends an event data and blocks until acknowledgement is received or operation times out. """ - if self.error: - raise self.error + self._check_closed() if isinstance(event_data, EventData): - if batching_label: - event_data._batching_label = batching_label + if partition_key: + event_data._set_partition_key(partition_key) wrapper_event_data = event_data else: wrapper_event_data = _BatchSendEventData( - self._set_batching_label(event_data, batching_label), - batching_label=batching_label) if batching_label else _BatchSendEventData(event_data) + self._set_partition_key(event_data, partition_key), + partition_key=partition_key) if partition_key else _BatchSendEventData(event_data) wrapper_event_data.message.on_send_complete = self._on_outcome - self._send_event_data(wrapper_event_data) + self.unsent_events = [wrapper_event_data.message] + self._send_event_data() def _on_outcome(self, outcome, condition): """ @@ -327,10 +343,13 @@ def _on_outcome(self, outcome, condition): :param outcome: The outcome of the message delivery - success or failure. :type outcome: ~uamqp.constants.MessageSendResult + :param condition: Detail information of the outcome. + """ self._outcome = outcome self._condition = condition @staticmethod def _error(outcome, condition): - return None if outcome == MessageSendResult.Ok else EventHubError(outcome, condition) + if outcome != MessageSendResult.Ok: + raise condition diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/eh_partition_pump.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/eh_partition_pump.py index d2c649f9a0a6..84d6a9ae84fe 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/eh_partition_pump.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/eh_partition_pump.py @@ -5,7 +5,7 @@ import logging import asyncio -from azure.eventhub import EventPosition +from azure.eventhub import EventPosition, EventHubSharedKeyCredential from azure.eventhub.aio import EventHubClient from azure.eventprocessorhost.partition_pump import PartitionPump @@ -49,7 +49,6 @@ async def on_open_async(self): if self.pump_status == "Opening": loop = asyncio.get_event_loop() self.set_pump_status("Running") - await self.eh_client.run_async() self.running = loop.create_task(self.partition_receiver.run()) if self.pump_status in ["OpenFailed", "Errored"]: @@ -65,17 +64,19 @@ async def open_clients_async(self): """ await self.partition_context.get_initial_offset_async() # Create event hub client and receive handler and set options + hostname = "{}.{}".format(self.host.eh_config.sb_name, self.host.eh_config.namespace_suffix) + event_hub_path = self.host.eh_config.eh_name + shared_key_cred = EventHubSharedKeyCredential(self.host.eh_config.policy, self.host.eh_config.sas_key) + self.eh_client = EventHubClient( - self.host.eh_config.client_address, - debug=self.host.eph_options.debug_trace, + hostname, event_hub_path, shared_key_cred, + network_tracing=self.host.eph_options.debug_trace, http_proxy=self.host.eph_options.http_proxy) - self.partition_receive_handler = self.eh_client.add_async_receiver( - self.partition_context.consumer_group_name, - self.partition_context.partition_id, - EventPosition(self.partition_context.offset), + self.partition_receive_handler = self.eh_client.create_receiver( + partition_id=self.partition_context.partition_id, + consumer_group=self.partition_context.consumer_group_name, + event_position=EventPosition(self.partition_context.offset), prefetch=self.host.eph_options.prefetch_count, - keep_alive=self.host.eph_options.keep_alive_interval, - auto_reconnect=self.host.eph_options.auto_reconnect_on_error, loop=self.loop) self.partition_receiver = PartitionReceiver(self) @@ -85,7 +86,7 @@ async def clean_up_clients_async(self): """ if self.partition_receiver: if self.eh_client: - await self.eh_client.stop_async() + await self.partition_receive_handler.close() self.partition_receiver = None self.partition_receive_handler = None self.eh_client = None diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/partition_manager.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/partition_manager.py index d532846a5476..41ffe9d043bd 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/partition_manager.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/partition_manager.py @@ -9,6 +9,7 @@ from collections import Counter from azure.eventhub.aio import EventHubClient +from azure.eventhub import EventHubSharedKeyCredential from azure.eventprocessorhost.eh_partition_pump import EventHubPartitionPump from azure.eventprocessorhost.cancellation_token import CancellationToken @@ -36,18 +37,20 @@ async def get_partition_ids_async(self): :rtype: list[str] """ if not self.partition_ids: + hostname = "{}.{}".format(self.host.eh_config.sb_name, self.host.eh_config.namespace_suffix) + event_hub_path = self.host.eh_config.eh_name + shared_key_cred = EventHubSharedKeyCredential(self.host.eh_config.policy, self.host.eh_config.sas_key) + + eh_client = EventHubClient( + hostname, event_hub_path, shared_key_cred, + network_tracing=self.host.eph_options.debug_trace, + # http_proxy=self.host.eph_options.http_proxy, + ) try: - eh_client = EventHubClient( - self.host.eh_config.client_address, - debug=self.host.eph_options.debug_trace, - http_proxy=self.host.eph_options.http_proxy) - try: - eh_info = await eh_client.get_eventhub_info_async() - self.partition_ids = eh_info['partition_ids'] - except Exception as err: # pylint: disable=broad-except - raise Exception("Failed to get partition ids", repr(err)) - finally: - await eh_client.stop_async() + eh_info = await eh_client.get_properties() + self.partition_ids = eh_info['partition_ids'] + except Exception as err: # pylint: disable=broad-except + raise Exception("Failed to get partition ids", repr(err)) return self.partition_ids async def start_async(self): diff --git a/sdk/eventhub/azure-eventhubs/conftest.py b/sdk/eventhub/azure-eventhubs/conftest.py index 68a211917f4c..a23dc86be6a8 100644 --- a/sdk/eventhub/azure-eventhubs/conftest.py +++ b/sdk/eventhub/azure-eventhubs/conftest.py @@ -29,7 +29,7 @@ from azure.eventprocessorhost.partition_pump import PartitionPump from azure.eventprocessorhost.partition_manager import PartitionManager -from azure.eventhub import EventHubClient, Receiver, EventPosition +from azure.eventhub import EventHubClient, EventReceiver, EventPosition def get_logger(filename, level=logging.INFO): @@ -162,16 +162,23 @@ def device_id(): pytest.skip("No Iothub device ID found.") +@pytest.fixture() +def aad_credential(): + try: + return os.environ['AAD_CLIENT_ID'], os.environ['AAD_SECRET'], os.environ['AAD_TENANT_ID'] + except KeyError: + pytest.skip('No Azure Active Directory credential found') + + @pytest.fixture() def connstr_receivers(connection_str): - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, network_tracing=True) partitions = client.get_partition_ids() receivers = [] for p in partitions: - #receiver = client.create_receiver(partition_id=p, prefetch=500, event_position=EventPosition("@latest")) receiver = client.create_receiver(partition_id=p, prefetch=500, event_position=EventPosition("-1")) + receiver._open() receivers.append(receiver) - receiver.receive(timeout=1) yield connection_str, receivers for r in receivers: @@ -180,7 +187,7 @@ def connstr_receivers(connection_str): @pytest.fixture() def connstr_senders(connection_str): - client = EventHubClient.from_connection_string(connection_str, debug=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=True) partitions = client.get_partition_ids() senders = [] diff --git a/sdk/eventhub/azure-eventhubs/dev_requirements.txt b/sdk/eventhub/azure-eventhubs/dev_requirements.txt index bd3d6e3bb6e0..fa716ae38ebe 100644 --- a/sdk/eventhub/azure-eventhubs/dev_requirements.txt +++ b/sdk/eventhub/azure-eventhubs/dev_requirements.txt @@ -1,4 +1,5 @@ -e ../../servicebus/azure-servicebus +-e ../../core/azure-core pytest>=3.4.1 pytest-asyncio>=0.8.0; python_version > '3.4' docutils>=0.14 diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/iterator_receiver_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/iterator_receiver_async.py new file mode 100644 index 000000000000..8149c1c2a51d --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/iterator_receiver_async.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python + +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +An example to show iterator receiver. +""" + +import os +import time +import logging +import asyncio + +from azure.eventhub.aio import EventHubClient +from azure.eventhub import EventPosition, EventHubSharedKeyCredential, EventData + +import examples +logger = examples.get_logger(logging.INFO) + + +HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net +EVENT_HUB = os.environ.get('EVENT_HUB_NAME') + +USER = os.environ.get('EVENT_HUB_SAS_POLICY') +KEY = os.environ.get('EVENT_HUB_SAS_KEY') + +EVENT_POSITION = EventPosition.first_available_event() + + +async def iter_receiver(receiver): + async with receiver: + async for item in receiver: + print(item.body_as_str(), item.offset.value, receiver.name) + + +async def main(): + if not HOSTNAME: + raise ValueError("No EventHubs URL supplied.") + client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), + network_tracing=False) + receiver = client.create_receiver(partition_id="0", event_position=EVENT_POSITION) + await iter_receiver(receiver) + +if __name__ == '__main__': + asyncio.run(main()) + diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_async.py new file mode 100644 index 000000000000..63ad3428fb40 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_async.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python + +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +An example to show running concurrent receivers. +""" + +import os +import time +import logging +import asyncio + +from azure.eventhub.aio import EventHubClient +from azure.eventhub import EventPosition, EventHubSharedKeyCredential + +import examples +logger = examples.get_logger(logging.INFO) + + +HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net +EVENT_HUB = os.environ.get('EVENT_HUB_NAME') + +USER = os.environ.get('EVENT_HUB_SAS_POLICY') +KEY = os.environ.get('EVENT_HUB_SAS_KEY') + +EVENT_POSITION = EventPosition.first_available_event() + + +async def pump(client, partition): + receiver = client.create_receiver(partition_id=partition, event_position=EVENT_POSITION, prefetch=5) + async with receiver: + total = 0 + start_time = time.time() + for event_data in await receiver.receive(timeout=10): + last_offset = event_data.offset + last_sn = event_data.sequence_number + print("Received: {}, {}".format(last_offset.value, last_sn)) + total += 1 + end_time = time.time() + run_time = end_time - start_time + print("Received {} messages in {} seconds".format(total, run_time)) + +try: + if not HOSTNAME: + raise ValueError("No EventHubs URL supplied.") + + loop = asyncio.get_event_loop() + client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), + network_tracing=False) + tasks = [ + asyncio.ensure_future(pump(client, "0")), + asyncio.ensure_future(pump(client, "1"))] + loop.run_until_complete(asyncio.wait(tasks)) + loop.close() + +except KeyboardInterrupt: + pass diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py new file mode 100644 index 000000000000..200a2be8ad98 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python + +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +An example to show sending events asynchronously to an Event Hub with partition keys. +""" + +# pylint: disable=C0111 + +import logging +import time +import asyncio +import os + +from azure.eventhub.aio import EventHubClient +from azure.eventhub import EventData, EventHubSharedKeyCredential + +import examples +logger = examples.get_logger(logging.INFO) + +HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net +EVENT_HUB = os.environ.get('EVENT_HUB_NAME') + +USER = os.environ.get('EVENT_HUB_SAS_POLICY') +KEY = os.environ.get('EVENT_HUB_SAS_KEY') + + +async def run(client): + sender = client.create_sender() + await send(sender, 4) + + +async def send(sender, count): + async with sender: + for i in range(count): + logger.info("Sending message: {}".format(i)) + data = EventData(str(i)) + await sender.send(data) + +try: + if not HOSTNAME: + raise ValueError("No EventHubs URL supplied.") + + loop = asyncio.get_event_loop() + client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), + network_tracing=False) + tasks = asyncio.gather( + run(client), + run(client)) + start_time = time.time() + loop.run_until_complete(tasks) + end_time = time.time() + run_time = end_time - start_time + logger.info("Runtime: {} seconds".format(run_time)) + loop.close() + +except KeyboardInterrupt: + pass diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py index 7f18fc97b756..4a36faa88947 100644 --- a/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py @@ -11,169 +11,94 @@ import logging import asyncio -from azure.eventhub import EventHubError +from azure.eventhub import EventHubError, EventData @pytest.mark.asyncio async def test_example_eventhub_async_send_and_receive(live_eventhub_config): # [START create_eventhub_client_async] - from azure.eventhub import EventHubClientAsync + from azure.eventhub.aio import EventHubClient import os connection_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( os.environ['EVENT_HUB_HOSTNAME'], os.environ['EVENT_HUB_SAS_POLICY'], os.environ['EVENT_HUB_SAS_KEY'], os.environ['EVENT_HUB_NAME']) - client = EventHubClientAsync.from_connection_string(connection_str) + client = EventHubClient.from_connection_string(connection_str) # [END create_eventhub_client_async] - from azure.eventhub import EventData, Offset + from azure.eventhub import EventData, EventPosition # [START create_eventhub_client_async_sender] - client = EventHubClientAsync.from_connection_string(connection_str) - # Add a async sender to the async client object. - sender = client.add_async_sender(partition="0") + client = EventHubClient.from_connection_string(connection_str) + # Create an async sender. + sender = client.create_sender(partition_id="0") # [END create_eventhub_client_async_sender] # [START create_eventhub_client_async_receiver] - client = EventHubClientAsync.from_connection_string(connection_str) - # Add a async receiver to the async client object. - receiver = client.add_async_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) + client = EventHubClient.from_connection_string(connection_str) + # Create an async receiver. + receiver = client.create_receiver(partition_id="0", consumer_group="$default", event_position=EventPosition('@latest')) + # Create an exclusive async receiver. + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest'), exclusive_receiver_priority=1) # [END create_eventhub_client_async_receiver] - # [START create_eventhub_client_async_epoch_receiver] - client = EventHubClientAsync.from_connection_string(connection_str) - # Add a async receiver to the async client object. - epoch_receiver = client.add_async_epoch_receiver(consumer_group="$default", partition="0", epoch=42) - # [END create_eventhub_client_async_epoch_receiver] + client = EventHubClient.from_connection_string(connection_str) + sender = client.create_sender(partition_id="0") + receiver = client.create_receiver(partition_id="0", consumer_group="$default", event_position=EventPosition('@latest')) - # [START eventhub_client_run_async] - client = EventHubClientAsync.from_connection_string(connection_str) - # Add AsyncSenders/AsyncReceivers - try: - # Opens the connection and starts running all AsyncSender/AsyncReceiver clients. - await client.run_async() - # Start sending and receiving - except: - raise - finally: - await client.stop_async() - # [END eventhub_client_run_async] + await receiver.receive(timeout=1) - - client = EventHubClientAsync.from_connection_string(connection_str) - sender = client.add_async_sender(partition="0") - receiver = client.add_async_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) - try: - # Opens the connection and starts running all AsyncSender/AsyncReceiver clients. - await client.run_async() - - # [START eventhub_client_async_send] + # [START eventhub_client_async_send] + async with sender: event_data = EventData(b"A single event") await sender.send(event_data) - # [END eventhub_client_async_send] - time.sleep(1) - # [START eventhub_client_async_receive] - logger = logging.getLogger("azure.eventhub") + # [END eventhub_client_async_send] + + await asyncio.sleep(1) + + # [START eventhub_client_async_receive] + logger = logging.getLogger("azure.eventhub") + async with receiver: received = await receiver.receive(timeout=5) for event_data in received: logger.info("Message received:{}".format(event_data.body_as_str())) - # [END eventhub_client_async_receive] + # [END eventhub_client_async_receive] assert len(received) == 1 assert received[0].body_as_str() == "A single event" assert list(received[-1].body)[0] == b"A single event" - except: - raise - finally: - await client.stop_async() - - # [START eventhub_client_async_stop] - client = EventHubClientAsync.from_connection_string(connection_str) - # Add AsyncSenders/AsyncReceivers - try: - # Opens the connection and starts running all AsyncSender/AsyncReceiver clients. - await client.run_async() - # Start sending and receiving - except: - raise - finally: - await client.stop_async() - # [END eventhub_client_async_stop] @pytest.mark.asyncio async def test_example_eventhub_async_sender_ops(live_eventhub_config, connection_str): - import os - # [START create_eventhub_client_async_sender_instance] - from azure.eventhub import EventHubClientAsync - - client = EventHubClientAsync.from_connection_string(connection_str) - sender = client.add_async_sender(partition="0") - # [END create_eventhub_client_async_sender_instance] - - # [START eventhub_client_async_sender_open] - client = EventHubClientAsync.from_connection_string(connection_str) - sender = client.add_async_sender(partition="0") - try: - # Open the Async Sender using the supplied conneciton. - await sender.open_async() - # Start sending - except: - raise - finally: - # Close down the send handler. - await sender.close_async() - # [END eventhub_client_async_sender_open] + from azure.eventhub.aio import EventHubClient + from azure.eventhub import EventData # [START eventhub_client_async_sender_close] - client = EventHubClientAsync.from_connection_string(connection_str) - sender = client.add_async_sender(partition="0") + client = EventHubClient.from_connection_string(connection_str) + sender = client.create_sender(partition_id="0") try: - # Open the Async Sender using the supplied conneciton. - await sender.open_async() - # Start sending - except: - raise + await sender.send(EventData(b"A single event")) finally: # Close down the send handler. - await sender.close_async() + await sender.close() # [END eventhub_client_async_sender_close] @pytest.mark.asyncio async def test_example_eventhub_async_receiver_ops(live_eventhub_config, connection_str): - import os - # [START create_eventhub_client_async_receiver_instance] - from azure.eventhub import EventHubClientAsync, Offset - - client = EventHubClientAsync.from_connection_string(connection_str) - receiver = client.add_async_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) - # [END create_eventhub_client_async_receiver_instance] - - # [START eventhub_client_async_receiver_open] - client = EventHubClientAsync.from_connection_string(connection_str) - receiver = client.add_async_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) - try: - # Open the Async Receiver using the supplied conneciton. - await receiver.open_async() - # Start receiving - except: - raise - finally: - # Close down the receive handler. - await receiver.close_async() - # [END eventhub_client_async_receiver_open] + from azure.eventhub.aio import EventHubClient + from azure.eventhub import EventPosition # [START eventhub_client_async_receiver_close] - client = EventHubClientAsync.from_connection_string(connection_str) - receiver = client.add_async_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) + client = EventHubClient.from_connection_string(connection_str) + receiver = client.create_receiver(partition_id="0", consumer_group="$default", event_position=EventPosition('@latest')) try: - # Open the Async Receiver using the supplied conneciton. - await receiver.open_async() - # Start receiving + # Open and receive + await receiver.receive(timeout=1) except: raise finally: # Close down the receive handler. - await receiver.close_async() - # [END eventhub_client_async_receiver_close] \ No newline at end of file + await receiver.close() + # [END eventhub_client_async_receiver_close] diff --git a/sdk/eventhub/azure-eventhubs/examples/batch_send.py b/sdk/eventhub/azure-eventhubs/examples/batch_send.py index 7cbf6259d661..bf80907a655e 100644 --- a/sdk/eventhub/azure-eventhubs/examples/batch_send.py +++ b/sdk/eventhub/azure-eventhubs/examples/batch_send.py @@ -1,54 +1,49 @@ #!/usr/bin/env python +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + """ An example to show batch sending events to an Event Hub. """ # pylint: disable=C0111 -import sys import logging -import datetime import time import os -from azure.eventhub import EventHubClient, Sender, EventData +from azure.eventhub import EventData, EventHubClient, EventHubSharedKeyCredential + import examples logger = examples.get_logger(logging.INFO) -# Address can be in either of these formats: -# "amqps://:@.servicebus.windows.net/myeventhub" -# "amqps://.servicebus.windows.net/myeventhub" -ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') +HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net +EVENT_HUB = os.environ.get('EVENT_HUB_NAME') -# SAS policy and key are not required if they are encoded in the URL USER = os.environ.get('EVENT_HUB_SAS_POLICY') KEY = os.environ.get('EVENT_HUB_SAS_KEY') -def data_generator(): - for i in range(1500): - logger.info("Yielding message {}".format(i)) - yield b"Hello world" - - try: - if not ADDRESS: + if not HOSTNAME: raise ValueError("No EventHubs URL supplied.") - client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) - sender = client.add_sender(partition="1") - client.run() - try: + client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) + sender = client.create_sender(partition_id="1") + + event_list = [] + for i in range(1500): + event_list.append('Hello World') + + with sender: start_time = time.time() - data = EventData(batch=data_generator()) + data = EventData(body=event_list) sender.send(data) - except: - raise - finally: end_time = time.time() - client.stop() run_time = end_time - start_time logger.info("Runtime: {} seconds".format(run_time)) diff --git a/sdk/eventhub/azure-eventhubs/examples/batch_transfer.py b/sdk/eventhub/azure-eventhubs/examples/batch_transfer.py deleted file mode 100644 index 676ac6c3e2ea..000000000000 --- a/sdk/eventhub/azure-eventhubs/examples/batch_transfer.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python - -""" -An example to show batch sending events to an Event Hub. -""" - -# pylint: disable=C0111 - -import sys -import logging -import datetime -import time -import os - -from azure.eventhub import EventHubClient, Sender, EventData - -import examples -logger = examples.get_logger(logging.INFO) - -# Address can be in either of these formats: -# "amqps://:@.servicebus.windows.net/myeventhub" -# "amqps://.servicebus.windows.net/myeventhub" -ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') - -# SAS policy and key are not required if they are encoded in the URL -USER = os.environ.get('EVENT_HUB_SAS_POLICY') -KEY = os.environ.get('EVENT_HUB_SAS_KEY') - - -def callback(outcome, condition): - logger.info("Message sent. Outcome: {}, Condition: {}".format( - outcome, condition)) - - -def data_generator(): - for i in range(1500): - logger.info("Yielding message {}".format(i)) - yield b"Hello world" - - -try: - if not ADDRESS: - raise ValueError("No EventHubs URL supplied.") - - client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) - sender = client.add_sender() - client.run() - try: - start_time = time.time() - data = EventData(batch=data_generator()) - sender.transfer(data, callback=callback) - sender.wait() - except: - raise - finally: - end_time = time.time() - client.stop() - run_time = end_time - start_time - logger.info("Runtime: {} seconds".format(run_time)) - -except KeyboardInterrupt: - pass diff --git a/sdk/eventhub/azure-eventhubs/examples/client_secret_auth.py b/sdk/eventhub/azure-eventhubs/examples/client_secret_auth.py new file mode 100644 index 000000000000..39697379f4d2 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/examples/client_secret_auth.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python + +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +""" +An example to show authentication using aad credentials +""" + +import os +import time +import logging + +from azure.eventhub.aio import EventHubClient +from azure.eventhub import EventData +from azure.identity import ClientSecretCredential + +import examples +logger = examples.get_logger(logging.INFO) + + +HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net +EVENT_HUB = os.environ.get('EVENT_HUB_NAME') + +USER = os.environ.get('EVENT_HUB_SAS_POLICY') +KEY = os.environ.get('EVENT_HUB_SAS_KEY') + +CLIENT_ID = os.environ.get('AAD_CLIENT_ID') +SECRET = os.environ.get('AAD_SECRET') +TENANT_ID = os.environ.get('AAD_TENANT_ID') + + +credential = ClientSecretCredential(client_id=CLIENT_ID, secret=SECRET, tenant_id=TENANT_ID) +client = EventHubClient(host=HOSTNAME, + event_hub_path=EVENT_HUB, + credential=credential) +try: + sender = client.create_sender(partition_id='0') + + with sender: + event = EventData(body='A single message') + sender.send(event) + +except KeyboardInterrupt: + pass +except Exception as e: + print(e) diff --git a/sdk/eventhub/azure-eventhubs/examples/eph.py b/sdk/eventhub/azure-eventhubs/examples/eph.py index 39f0fbba4179..66e4e6aa866f 100644 --- a/sdk/eventhub/azure-eventhubs/examples/eph.py +++ b/sdk/eventhub/azure-eventhubs/examples/eph.py @@ -109,7 +109,7 @@ async def wait_and_close(host): EventProcessor, eh_config, storage_manager, - ep_params=["param1","param2"], + ep_params=["param1", "param2"], eph_options=eh_options, loop=loop) diff --git a/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py b/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py index acf88117844e..7961abef5c0a 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py +++ b/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. @@ -6,23 +8,21 @@ """ An example to show receiving events from an IoT Hub partition. """ -from azure import eventhub -from azure.eventhub import EventData, EventHubClient, Offset - import os import logging -logger = logging.getLogger('azure.eventhub') -iot_connection_str = os.environ['IOTHUB_CONNECTION_STR'] +from azure.eventhub import EventHubClient -client = EventHubClient.from_iothub_connection_string(iot_connection_str, debug=True) -receiver = client.add_receiver("$default", "0", operation='/messages/events') -try: - client.run() - eh_info = client.get_eventhub_info() - print(eh_info) +logger = logging.getLogger('azure.eventhub') + +iot_connection_str = 'HostName=iothubfortrack2py.azure-devices.net;SharedAccessKeyName=iothubowner;SharedAccessKey=glF9a2n0D9fgmWpfTqjjmvkYt0WaTNqZx9GV/UKwDkQ=' # os.environ['IOTHUB_CONNECTION_STR'] + +client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) +receiver = client.create_receiver(partition_id="0", operation='/messages/events') +with receiver: received = receiver.receive(timeout=5) print(received) -finally: - client.stop() + + eh_info = client.get_properties() + print(eh_info) diff --git a/sdk/eventhub/azure-eventhubs/examples/iothub_send.py b/sdk/eventhub/azure-eventhubs/examples/iothub_send.py new file mode 100644 index 000000000000..ab4fcb2adec6 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/examples/iothub_send.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +""" +An example to show receiving events from an IoT Hub partition. +""" +import os +import logging + +from azure.eventhub import EventData, EventHubClient + + +logger = logging.getLogger('azure.eventhub') + +iot_device_id = os.environ['IOTHUB_DEVICE'] +iot_connection_str = os.environ['IOTHUB_CONNECTION_STR'] + +client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) +try: + sender = client.create_sender(operation='/messages/devicebound') + with sender: + sender.send(EventData(b"A single event", to_device=iot_device_id)) + +except KeyboardInterrupt: + pass diff --git a/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py b/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py new file mode 100644 index 000000000000..a8dd0a4b4400 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python + +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +from threading import Thread +import os +import time +import logging + +from azure.eventhub import EventHubClient, EventPosition, EventHubSharedKeyCredential, EventData + +import examples +logger = examples.get_logger(logging.INFO) + + +HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net +EVENT_HUB = os.environ.get('EVENT_HUB_NAME') + +USER = os.environ.get('EVENT_HUB_SAS_POLICY') +KEY = os.environ.get('EVENT_HUB_SAS_KEY') + +EVENT_POSITION = EventPosition.first_available_event() + + +class PartitionReceiverThread(Thread): + def __init__(self, receiver): + Thread.__init__(self) + self.receiver = receiver + + def run(self): + for item in self.receiver: + print(item) + + +client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), + network_tracing=True) +receiver = client.create_receiver(partition_id="0", event_position=EVENT_POSITION) +with receiver: + thread = PartitionReceiverThread(receiver) + thread.start() + thread.join(2) # stop after 2 seconds diff --git a/sdk/eventhub/azure-eventhubs/examples/proxy.py b/sdk/eventhub/azure-eventhubs/examples/proxy.py index b4a2d51b0411..0417fe32d0f4 100644 --- a/sdk/eventhub/azure-eventhubs/examples/proxy.py +++ b/sdk/eventhub/azure-eventhubs/examples/proxy.py @@ -1,27 +1,30 @@ #!/usr/bin/env python +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + """ An example to show sending and receiving events behind a proxy """ import os import logging -from azure.eventhub import EventHubClient, EventPosition, EventData +from azure.eventhub import EventHubClient, EventPosition, EventData, EventHubSharedKeyCredential import examples logger = examples.get_logger(logging.INFO) -# Address can be in either of these formats: -# "amqps://:@.servicebus.windows.net/myeventhub" -# "amqps://.servicebus.windows.net/myeventhub" -ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') +# Hostname can be .servicebus.windows.net" +HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') +EVENT_HUB = os.environ.get('EVENT_HUB_NAME') -# SAS policy and key are not required if they are encoded in the URL USER = os.environ.get('EVENT_HUB_SAS_POLICY') KEY = os.environ.get('EVENT_HUB_SAS_KEY') -CONSUMER_GROUP = "$default" -EVENT_POSITION = EventPosition.first_available() + +EVENT_POSITION = EventPosition.first_available_event() PARTITION = "0" HTTP_PROXY = { 'proxy_hostname': '127.0.0.1', # proxy hostname @@ -31,27 +34,28 @@ } -if not ADDRESS: +if not HOSTNAME: raise ValueError("No EventHubs URL supplied.") -client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY, http_proxy=HTTP_PROXY) -sender = client.create_sender(partition=PARTITION) -receiver = client.create_receiver(consumer_group=CONSUMER_GROUP, partition=PARTITION, event_position=EVENT_POSITION) +client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False, http_proxy=HTTP_PROXY) try: + sender = client.create_sender(partition_id=PARTITION) + receiver = client.create_receiver(partition_id=PARTITION, event_position=EVENT_POSITION) + + receiver.receive(timeout=1) + event_list = [] for i in range(20): event_list.append(EventData("Event Number {}".format(i))) print('Start sending events behind a proxy.') - with sender: - sender.send(list) + sender.send(event_list) print('Start receiving events behind a proxy.') - with receiver: - received = receiver.receive(max_batch_size=50, timeout=5) - -except KeyboardInterrupt: - pass + received = receiver.receive(max_batch_size=50, timeout=5) +finally: + sender.close() + receiver.close() diff --git a/sdk/eventhub/azure-eventhubs/examples/recv.py b/sdk/eventhub/azure-eventhubs/examples/recv.py index f43d03be9ce5..0e85dcb5fb39 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv.py @@ -9,51 +9,46 @@ An example to show receiving events from an Event Hub partition. """ import os -import sys import logging import time -from azure.eventhub import EventHubClient, Receiver, Offset +from azure.eventhub import EventHubClient, EventPosition, EventHubSharedKeyCredential import examples logger = examples.get_logger(logging.INFO) -# Address can be in either of these formats: -# "amqps://:@.servicebus.windows.net/myeventhub" -# "amqps://.servicebus.windows.net/myeventhub" -ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') +HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net +EVENT_HUB = os.environ.get('EVENT_HUB_NAME') -# SAS policy and key are not required if they are encoded in the URL USER = os.environ.get('EVENT_HUB_SAS_POLICY') KEY = os.environ.get('EVENT_HUB_SAS_KEY') -CONSUMER_GROUP = "$default" -OFFSET = Offset("-1") + +EVENT_POSITION = EventPosition.first_available_event() PARTITION = "0" total = 0 last_sn = -1 last_offset = "-1" -client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) +client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), + network_tracing=False) + try: - receiver = client.add_receiver(CONSUMER_GROUP, PARTITION, prefetch=5000, offset=OFFSET) - client.run() - start_time = time.time() - batch = receiver.receive(timeout=5000) - while batch: - for event_data in batch: - last_offset = event_data.offset - last_sn = event_data.sequence_number - print("Received: {}, {}".format(last_offset.value, last_sn)) - print(event_data.body_as_str()) - total += 1 + receiver = client.create_receiver(partition_id=PARTITION, prefetch=5000, event_position=EVENT_POSITION) + with receiver: + start_time = time.time() batch = receiver.receive(timeout=5000) - - end_time = time.time() - client.stop() - run_time = end_time - start_time - print("Received {} messages in {} seconds".format(total, run_time)) + while batch: + for event_data in batch: + last_offset = event_data.offset + last_sn = event_data.sequence_number + print("Received: {}, {}".format(last_offset.value, last_sn)) + print(event_data.body_as_str()) + total += 1 + batch = receiver.receive(timeout=5000) + + end_time = time.time() + run_time = end_time - start_time + print("Received {} messages in {} seconds".format(total, run_time)) except KeyboardInterrupt: pass -finally: - client.stop() \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs/examples/recv_async.py b/sdk/eventhub/azure-eventhubs/examples/recv_async.py deleted file mode 100644 index 04d922649b3c..000000000000 --- a/sdk/eventhub/azure-eventhubs/examples/recv_async.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python - -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- - -""" -An example to show running concurrent receivers. -""" - -import os -import sys -import time -import logging -import asyncio -from azure.eventhub import Offset, EventHubClientAsync, AsyncReceiver - -import examples -logger = examples.get_logger(logging.INFO) - -# Address can be in either of these formats: -# "amqps://:@.servicebus.windows.net/myeventhub" -# "amqps://.servicebus.windows.net/myeventhub" -ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') - -# SAS policy and key are not required if they are encoded in the URL -USER = os.environ.get('EVENT_HUB_SAS_POLICY') -KEY = os.environ.get('EVENT_HUB_SAS_KEY') -CONSUMER_GROUP = "$default" -OFFSET = Offset("-1") - - -async def pump(client, partition): - receiver = client.add_async_receiver(CONSUMER_GROUP, partition, OFFSET, prefetch=5) - await client.run_async() - total = 0 - start_time = time.time() - for event_data in await receiver.receive(timeout=10): - last_offset = event_data.offset - last_sn = event_data.sequence_number - print("Received: {}, {}".format(last_offset.value, last_sn)) - total += 1 - end_time = time.time() - run_time = end_time - start_time - print("Received {} messages in {} seconds".format(total, run_time)) - -try: - if not ADDRESS: - raise ValueError("No EventHubs URL supplied.") - - loop = asyncio.get_event_loop() - client = EventHubClientAsync(ADDRESS, debug=False, username=USER, password=KEY) - tasks = [ - asyncio.ensure_future(pump(client, "0")), - asyncio.ensure_future(pump(client, "1"))] - loop.run_until_complete(asyncio.wait(tasks)) - loop.run_until_complete(client.stop_async()) - loop.close() - -except KeyboardInterrupt: - pass diff --git a/sdk/eventhub/azure-eventhubs/examples/recv_batch.py b/sdk/eventhub/azure-eventhubs/examples/recv_batch.py index 9478f51feb21..0b270454828f 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv_batch.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv_batch.py @@ -11,45 +11,41 @@ """ import os -import sys import logging -from azure.eventhub import EventHubClient, Receiver, Offset + +from azure.eventhub import EventHubClient, EventPosition, EventHubSharedKeyCredential import examples logger = examples.get_logger(logging.INFO) -# Address can be in either of these formats: -# "amqps://:@.servicebus.windows.net/myeventhub" -# "amqps://.servicebus.windows.net/myeventhub" -ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') +HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net +EVENT_HUB = os.environ.get('EVENT_HUB_NAME') -# SAS policy and key are not required if they are encoded in the URL USER = os.environ.get('EVENT_HUB_SAS_POLICY') KEY = os.environ.get('EVENT_HUB_SAS_KEY') -CONSUMER_GROUP = "$default" -OFFSET = Offset("-1") + +EVENT_POSITION = EventPosition.first_available_event() PARTITION = "0" total = 0 last_sn = -1 last_offset = "-1" -client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) +client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) + try: - receiver = client.add_receiver(CONSUMER_GROUP, PARTITION, prefetch=100, offset=OFFSET) - client.run() - batched_events = receiver.receive(max_batch_size=10) - for event_data in batched_events: - last_offset = event_data.offset.value - last_sn = event_data.sequence_number - total += 1 - print("Partition {}, Received {}, sn={} offset={}".format( - PARTITION, - total, - last_sn, - last_offset)) + receiver = client.create_receiver(partition_id=PARTITION, prefetch=100, event_position=EVENT_POSITION) + with receiver: + batched_events = receiver.receive(max_batch_size=10) + for event_data in batched_events: + last_offset = event_data.offset.value + last_sn = event_data.sequence_number + total += 1 + print("Partition {}, Received {}, sn={} offset={}".format( + PARTITION, + total, + last_sn, + last_offset)) except KeyboardInterrupt: pass -finally: - client.stop() \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs/examples/recv_epoch.py b/sdk/eventhub/azure-eventhubs/examples/recv_epoch.py index f9f291ed6bc3..3f82202dbd5b 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv_epoch.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv_epoch.py @@ -10,48 +10,46 @@ """ import os -import sys import time import logging import asyncio -from azure.eventhub import Offset, EventHubClientAsync, AsyncReceiver + +from azure.eventhub.aio import EventHubClient +from azure.eventhub import EventHubSharedKeyCredential import examples logger = examples.get_logger(logging.INFO) -# Address can be in either of these formats: -# "amqps://:@.servicebus.windows.net/myeventhub" -# "amqps://.servicebus.windows.net/myeventhub" -ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') +HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net +EVENT_HUB = os.environ.get('EVENT_HUB_NAME') -# SAS policy and key are not required if they are encoded in the URL USER = os.environ.get('EVENT_HUB_SAS_POLICY') KEY = os.environ.get('EVENT_HUB_SAS_KEY') -CONSUMER_GROUP = "$default" -EPOCH = 42 + +EXCLUSIVE_RECEIVER_PRIORITY = 42 PARTITION = "0" -async def pump(client, epoch): - receiver = client.add_async_epoch_receiver(CONSUMER_GROUP, PARTITION, epoch=epoch) - await client.run_async() - total = 0 - start_time = time.time() - for event_data in await receiver.receive(timeout=5): - last_offset = event_data.offset - last_sn = event_data.sequence_number - total += 1 - end_time = time.time() - run_time = end_time - start_time - await client.stop_async() - print("Received {} messages in {} seconds".format(total, run_time)) +async def pump(client, exclusive_receiver_priority): + receiver = client.create_receiver(partition_id=PARTITION, exclusive_receiver_priority=exclusive_receiver_priority) + async with receiver: + total = 0 + start_time = time.time() + for event_data in await receiver.receive(timeout=5): + last_offset = event_data.offset + last_sn = event_data.sequence_number + total += 1 + end_time = time.time() + run_time = end_time - start_time + print("Received {} messages in {} seconds".format(total, run_time)) try: - if not ADDRESS: + if not HOSTNAME: raise ValueError("No EventHubs URL supplied.") loop = asyncio.get_event_loop() - client = EventHubClientAsync(ADDRESS, debug=False, username=USER, password=KEY) + client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), + network_tracing=False) loop.run_until_complete(pump(client, 20)) loop.close() diff --git a/sdk/eventhub/azure-eventhubs/examples/send.py b/sdk/eventhub/azure-eventhubs/examples/send.py index 6881b0d578ee..316a6e2739a5 100644 --- a/sdk/eventhub/azure-eventhubs/examples/send.py +++ b/sdk/eventhub/azure-eventhubs/examples/send.py @@ -1,49 +1,52 @@ #!/usr/bin/env python +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + """ An example to show sending events to an Event Hub partition. """ # pylint: disable=C0111 -import sys import logging -import datetime import time import os -from azure.eventhub import EventHubClient, Sender, EventData +from azure.eventhub import EventHubClient, EventData, EventHubSharedKeyCredential import examples logger = examples.get_logger(logging.INFO) -# Address can be in either of these formats: -# "amqps://:@.servicebus.windows.net/myeventhub" -# "amqps://.servicebus.windows.net/myeventhub" -ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') +HOSTNAME = os.environ.get('EVENT_HUB_HOSTNAME') # .servicebus.windows.net +EVENT_HUB = os.environ.get('EVENT_HUB_NAME') -# SAS policy and key are not required if they are encoded in the URL USER = os.environ.get('EVENT_HUB_SAS_POLICY') KEY = os.environ.get('EVENT_HUB_SAS_KEY') try: - if not ADDRESS: + if not HOSTNAME: raise ValueError("No EventHubs URL supplied.") - client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) - sender = client.add_sender(partition="0") - client.run() + client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), + network_tracing=False) + sender = client.create_sender(partition_id="0") + + ed = EventData("msg") + try: start_time = time.time() - for i in range(100): - logger.info("Sending message: {}".format(i)) - sender.send(EventData(str(i))) + with sender: + for i in range(100): + logger.info("Sending message: {}".format(i)) + sender.send(ed) except: raise finally: end_time = time.time() - client.stop() run_time = end_time - start_time logger.info("Runtime: {} seconds".format(run_time)) diff --git a/sdk/eventhub/azure-eventhubs/examples/send_async.py b/sdk/eventhub/azure-eventhubs/examples/send_async.py deleted file mode 100644 index 248fdcf853b9..000000000000 --- a/sdk/eventhub/azure-eventhubs/examples/send_async.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env python - -""" -An example to show sending events asynchronously to an Event Hub with partition keys. -""" - -# pylint: disable=C0111 - -import sys -import logging -import time -import asyncio -import os - -from azure.eventhub import EventData, EventHubClientAsync, AsyncSender - -import examples -logger = examples.get_logger(logging.INFO) - -# Address can be in either of these formats: -# "amqps://:@.servicebus.windows.net/myeventhub" -# "amqps://.servicebus.windows.net/myeventhub" -ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') - -# SAS policy and key are not required if they are encoded in the URL -USER = os.environ.get('EVENT_HUB_SAS_POLICY') -KEY = os.environ.get('EVENT_HUB_SAS_KEY') - - -async def run(client): - sender = client.add_async_sender() - await client.run_async() - await send(sender, 4) - - -async def send(snd, count): - for i in range(count): - logger.info("Sending message: {}".format(i)) - data = EventData(str(i)) - data.partition_key = b'SamplePartitionKey' - await snd.send(data) - -try: - if not ADDRESS: - raise ValueError("No EventHubs URL supplied.") - - loop = asyncio.get_event_loop() - client = EventHubClientAsync(ADDRESS, debug=True, username=USER, password=KEY) - tasks = asyncio.gather( - run(client), - run(client)) - start_time = time.time() - loop.run_until_complete(tasks) - loop.run_until_complete(client.stop_async()) - end_time = time.time() - run_time = end_time - start_time - logger.info("Runtime: {} seconds".format(run_time)) - loop.close() - -except KeyboardInterrupt: - pass diff --git a/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py b/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py index ffd53e02fd3e..fd1c1ba1773e 100644 --- a/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py +++ b/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py @@ -16,34 +16,22 @@ def create_eventhub_client(live_eventhub_config): # [START create_eventhub_client] import os - from azure.eventhub import EventHubClient + from azure.eventhub import EventHubClient, EventHubSharedKeyCredential - address = os.environ['EVENT_HUB_ADDRESS'] + host = os.environ['EVENT_HUB_HOSTNAME'] + event_hub_path = os.environ['EVENT_HUB_NAME'] shared_access_policy = os.environ['EVENT_HUB_SAS_POLICY'] shared_access_key = os.environ['EVENT_HUB_SAS_KEY'] client = EventHubClient( - address=address, - username=shared_access_policy, - password=shared_access_key) + host=host, + event_hub_path=event_hub_path, + credential=EventHubSharedKeyCredential(shared_access_policy, shared_access_key) + ) # [END create_eventhub_client] return client -def create_eventhub_client_from_sas_token(live_eventhub_config): - # [START create_eventhub_client_sas_token] - import os - from azure.eventhub import EventHubClient - - address = os.environ['EVENT_HUB_ADDRESS'] - sas_token = os.environ['EVENT_HUB_SAS_TOKEN'] - - client = EventHubClient.from_sas_token( - address=address, - sas_token=sas_token) - # [END create_eventhub_client_sas_token] - - def create_eventhub_client_from_iothub_connection_string(live_eventhub_config): # [START create_eventhub_client_iot_connstr] import os @@ -67,188 +55,82 @@ def test_example_eventhub_sync_send_and_receive(live_eventhub_config): client = EventHubClient.from_connection_string(connection_str) # [END create_eventhub_client_connstr] - from azure.eventhub import EventData, Offset + from azure.eventhub import EventData, EventPosition # [START create_eventhub_client_sender] - client = EventHubClient.from_connection_string(connection_str) - # Add a sender to the client object. - sender = client.add_sender(partition="0") + client = EventHubClient.from_connection_string(connection_str) + # Create a sender. + sender = client.create_sender(partition_id="0") # [END create_eventhub_client_sender] # [START create_eventhub_client_receiver] - client = EventHubClient.from_connection_string(connection_str) - # Add a receiver to the client object. - receiver = client.add_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) - # [END create_eventhub_client_receiver] - - # [START create_eventhub_client_epoch_receiver] - client = EventHubClient.from_connection_string(connection_str) - # Add a receiver to the client object with an epoch value. - epoch_receiver = client.add_epoch_receiver(consumer_group="$default", partition="0", epoch=42) - # [END create_eventhub_client_epoch_receiver] - - # [START eventhub_client_run] client = EventHubClient.from_connection_string(connection_str) - # Add Senders/Receivers - try: - client.run() - # Start sending and receiving - except: - raise - finally: - client.stop() - # [END eventhub_client_run] + # Create a receiver. + receiver = client.create_receiver(partition_id="0", consumer_group="$default", event_position=EventPosition('@latest')) + # Create an exclusive receiver object. + exclusive_receiver = client.create_receiver(partition_id="0", exclusive_receiver_priority=1) + # [END create_eventhub_client_receiver] client = EventHubClient.from_connection_string(connection_str) - sender = client.add_sender(partition="0") - receiver = client.add_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) + sender = client.create_sender(partition_id="0") + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) try: - # Opens the connection and starts running all Sender/Receiver clients. - client.run() - # Start sending and receiving + receiver.receive(timeout=1) # [START create_event_data] event_data = EventData("String data") event_data = EventData(b"Bytes data") event_data = EventData([b"A", b"B", b"C"]) - def batched(): - for i in range(10): - yield "Batch data, Event number {}".format(i) - - event_data = EventData(batch=batched()) + list_data = ['Message {}'.format(i) for i in range(10)] + event_data = EventData(body=list_data) # [END create_event_data] # [START eventhub_client_sync_send] - event_data = EventData(b"A single event") - sender.send(event_data) + with sender: + event_data = EventData(b"A single event") + sender.send(event_data) # [END eventhub_client_sync_send] time.sleep(1) # [START eventhub_client_sync_receive] - logger = logging.getLogger("azure.eventhub") - received = receiver.receive(timeout=5, max_batch_size=1) - for event_data in received: - logger.info("Message received:{}".format(event_data.body_as_str())) + with receiver: + logger = logging.getLogger("azure.eventhub") + received = receiver.receive(timeout=5, max_batch_size=1) + for event_data in received: + logger.info("Message received:{}".format(event_data.body_as_str())) # [END eventhub_client_sync_receive] - assert len(received) == 1 - assert received[0].body_as_str() == "A single event" - assert list(received[-1].body)[0] == b"A single event" - except: - raise - - finally: - client.stop() - - # [START eventhub_client_stop] - client = EventHubClient.from_connection_string(connection_str) - # Add Senders/Receivers - try: - client.run() - # Start sending and receiving - except: - raise + assert len(received) == 1 + assert received[0].body_as_str() == "A single event" + assert list(received[-1].body)[0] == b"A single event" finally: - client.stop() - # [END eventhub_client_stop] + pass -def test_example_eventhub_transfer(connection_str): - import os +def test_example_eventhub_sender_ops(live_eventhub_config, connection_str): from azure.eventhub import EventHubClient, EventData - client = EventHubClient.from_connection_string(connection_str) - sender = client.add_sender() - - try: - client.run() - # [START eventhub_client_transfer] - logger = logging.getLogger("azure.eventhub") - def callback(outcome, condition): - logger.info("Message sent. Outcome: {}, Condition: {}".format( - outcome, condition)) - - event_data = EventData(b"A single event") - sender.transfer(event_data, callback=callback) - sender.wait() - # [END eventhub_client_transfer] - except: - raise - finally: - client.stop() - - -def test_example_eventhub_sync_sender_ops(live_eventhub_config, connection_str): - import os - # [START create_eventhub_client_sender_instance] - from azure.eventhub import EventHubClient - - client = EventHubClient.from_connection_string(connection_str) - sender = client.add_sender(partition="0") - # [END create_eventhub_client_sender_instance] - - # [START eventhub_client_sender_open] - client = EventHubClient.from_connection_string(connection_str) - sender = client.add_sender(partition="0") - try: - # Open the Sender using the supplied conneciton. - sender.open() - # Start sending - except: - raise - finally: - # Close down the send handler. - sender.close() - # [END eventhub_client_sender_open] - # [START eventhub_client_sender_close] client = EventHubClient.from_connection_string(connection_str) - sender = client.add_sender(partition="0") + sender = client.create_sender(partition_id="0") try: - # Open the Sender using the supplied conneciton. - sender.open() - # Start sending - except: - raise + sender.send(EventData(b"A single event")) finally: # Close down the send handler. sender.close() - # [END eventhub_client_sender_close] - - -def test_example_eventhub_sync_receiver_ops(live_eventhub_config, connection_str): - import os - # [START create_eventhub_client_receiver_instance] - from azure.eventhub import EventHubClient, Offset + # [END eventhub_client_sender_close] - client = EventHubClient.from_connection_string(connection_str) - receiver = client.add_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) - # [END create_eventhub_client_receiver_instance] - # [START eventhub_client_receiver_open] - client = EventHubClient.from_connection_string(connection_str) - receiver = client.add_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) - try: - # Open the Receiver using the supplied conneciton. - receiver.open() - # Start receiving - except: - raise - finally: - # Close down the receive handler. - receiver.close() - # [END eventhub_client_receiver_open] +def test_example_eventhub_receiver_ops(live_eventhub_config, connection_str): + from azure.eventhub import EventHubClient + from azure.eventhub import EventPosition # [START eventhub_client_receiver_close] client = EventHubClient.from_connection_string(connection_str) - receiver = client.add_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) + receiver = client.create_receiver(partition_id="0", consumer_group="$default", event_position=EventPosition('@latest')) try: - # Open the Receiver using the supplied conneciton. - receiver.open() - # Start receiving - except: - raise + receiver.receive(timeout=1) finally: # Close down the receive handler. receiver.close() - # [END eventhub_client_receiver_close] \ No newline at end of file + # [END eventhub_client_receiver_close] diff --git a/sdk/eventhub/azure-eventhubs/examples/transfer.py b/sdk/eventhub/azure-eventhubs/examples/transfer.py deleted file mode 100644 index 5190add90f5f..000000000000 --- a/sdk/eventhub/azure-eventhubs/examples/transfer.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python - -""" -An example to show sending events to an Event Hub. -""" - -# pylint: disable=C0111 - -import sys -import logging -import datetime -import time -import os - -from azure.eventhub import EventHubClient, Sender, EventData - -import examples -logger = examples.get_logger(logging.INFO) - - -# Address can be in either of these formats: -# "amqps://:@.servicebus.windows.net/myeventhub" -# "amqps://.servicebus.windows.net/myeventhub" -ADDRESS = os.environ.get('EVENT_HUB_ADDRESS') - -# SAS policy and key are not required if they are encoded in the URL -USER = os.environ.get('EVENT_HUB_SAS_POLICY') -KEY = os.environ.get('EVENT_HUB_SAS_KEY') - - -def callback(outcome, condition): - logger.info("Message sent. Outcome: {}, Condition: {}".format( - outcome, condition)) - - -try: - if not ADDRESS: - raise ValueError("No EventHubs URL supplied.") - - client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) - sender = client.add_sender(partition="1") - client.run() - try: - start_time = time.time() - for i in range(100): - sender.transfer(EventData(str(i)), callback=callback) - logger.info("Queued 100 messages.") - sender.wait() - logger.info("Finished processing queue.") - except: - raise - finally: - end_time = time.time() - client.stop() - run_time = end_time - start_time - logger.info("Runtime: {} seconds".format(run_time)) - -except KeyboardInterrupt: - pass diff --git a/sdk/eventhub/azure-eventhubs/sdk_packaging.toml b/sdk/eventhub/azure-eventhubs/sdk_packaging.toml new file mode 100644 index 000000000000..e7687fdae93b --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/sdk_packaging.toml @@ -0,0 +1,2 @@ +[packaging] +auto_update = false \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs/setup.py b/sdk/eventhub/azure-eventhubs/setup.py index 6f6cf3399021..170aeccceb87 100644 --- a/sdk/eventhub/azure-eventhubs/setup.py +++ b/sdk/eventhub/azure-eventhubs/setup.py @@ -41,14 +41,14 @@ 'examples', # Exclude packages that will be covered by PEP420 or nspkg 'azure', - '*.eventprocessorhost', - '*.eventprocessorhost.*' ] -if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] < 5): +if sys.version_info < (3, 5, 3): exclude_packages.extend([ '*.aio', - '*.aio.*' + '*.aio.*', + '*.eventprocessorhost', + '*.eventprocessorhost.*' ]) setup( @@ -66,7 +66,6 @@ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - # 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', @@ -79,7 +78,7 @@ 'msrestazure>=0.4.32,<2.0.0', 'azure-common~=1.1', 'azure-storage-blob~=1.3', - 'azure-core~=1.0', + 'azure-core>=0.0.1', ], extras_require={ ":python_version<'3.0'": ['azure-nspkg'], diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_auth_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_auth_async.py new file mode 100644 index 000000000000..4799df1a8634 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_auth_async.py @@ -0,0 +1,46 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import pytest +import time +import asyncio + +from azure.eventhub import EventData, EventPosition +from azure.eventhub.aio import EventHubClient + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_client_secret_credential_async(aad_credential, live_eventhub): + try: + from azure.identity.aio import AsyncClientSecretCredential + except ImportError: + pytest.skip("No azure identity library") + + client_id, secret, tenant_id = aad_credential + credential = AsyncClientSecretCredential(client_id=client_id, secret=secret, tenant_id=tenant_id) + client = EventHubClient(host=live_eventhub['hostname'], + event_hub_path=live_eventhub['event_hub'], + credential=credential, + user_agent='customized information') + sender = client.create_sender(partition_id='0') + receiver = client.create_receiver(partition_id='0', event_position=EventPosition.new_events_only()) + + async with receiver: + + received = await receiver.receive(timeout=1) + assert len(received) == 0 + + async with sender: + event = EventData(body='A single message') + await sender.send(event) + + await asyncio.sleep(1) + + received = await receiver.receive(timeout=1) + + assert len(received) == 1 + assert list(received[0].body)[0] == 'A single message'.encode('utf-8') diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py index fdb5c1ffea35..c7ebd17a9fbc 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py @@ -34,6 +34,7 @@ async def get_partitions(iot_connection_str): @pytest.mark.liveTest @pytest.mark.asyncio async def test_iothub_receive_multiple_async(iot_connection_str): + pytest.skip("This will get AuthenticationError. We're investigating...") partitions = await get_partitions(iot_connection_str) client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) receivers = [] diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph.py index 78611b3bf2ef..11fda8eddefa 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph.py @@ -45,6 +45,7 @@ def get_logger(filename, level=logging.INFO): return azure_logger + logger = get_logger("eph_test_async.log", logging.INFO) @@ -125,13 +126,15 @@ async def pump(pid, sender, duration): try: async with sender: + list = [] while time.time() < deadline: data = EventData(body=b"D" * 512) - sender.queue_message(data) + list.append(data) total += 1 if total % 100 == 0: - await sender.send_pending_messages() - #logger.info("{}: Send total {}".format(pid, total)) + await sender.send(data) + list = [] + logger.info("{}: Send total {}".format(pid, total)) except Exception as err: logger.error("{}: Send failed {}".format(pid, err)) raise @@ -139,7 +142,8 @@ async def pump(pid, sender, duration): @pytest.mark.liveTest -def test_long_running_eph(live_eventhub): +@pytest.mark.asyncio +async def test_long_running_eph(live_eventhub): parser = argparse.ArgumentParser() parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) parser.add_argument("--storage-account", help="Storage account name", default=os.environ.get('AZURE_STORAGE_ACCOUNT')) @@ -169,9 +173,9 @@ def test_long_running_eph(live_eventhub): send_client = EventHubClient.from_connection_string(conn_str) pumps = [] for pid in ["0", "1"]: - sender = send_client.create_sender(partition_id=pid, send_timeout=0, keep_alive=False) + sender = send_client.create_sender(partition_id=pid, send_timeout=0) pumps.append(pump(pid, sender, 15)) - results = loop.run_until_complete(asyncio.gather(*pumps, return_exceptions=True)) + results = await asyncio.gather(*pumps, return_exceptions=True) assert not any(results) @@ -206,7 +210,7 @@ def test_long_running_eph(live_eventhub): tasks = asyncio.gather( host.open_async(), wait_and_close(host, args.duration), return_exceptions=True) - results = loop.run_until_complete(tasks) + results = await tasks assert not any(results) @@ -219,4 +223,5 @@ def test_long_running_eph(live_eventhub): config['namespace'] = os.environ['EVENT_HUB_NAMESPACE'] config['consumer_group'] = "$Default" config['partition'] = "0" - test_long_running_eph(config) + loop = asyncio.get_event_loop() + loop.run_until_complete(test_long_running_eph(config)) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py index 7b4a9021db1d..2a6a83048251 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py @@ -130,13 +130,15 @@ async def pump(pid, sender, duration): try: async with sender: + list = [] while time.time() < deadline: data = EventData(body=b"D" * 512) - sender.queue_message(data) + list.append(data) total += 1 if total % 100 == 0: - await sender.send_pending_messages() - #logger.info("{}: Send total {}".format(pid, total)) + await sender.send(list) + list = [] + logger.info("{}: Send total {}".format(pid, total)) except Exception as err: logger.error("{}: Send failed {}".format(pid, err)) raise @@ -144,7 +146,8 @@ async def pump(pid, sender, duration): @pytest.mark.liveTest -def test_long_running_context_eph(live_eventhub): +@pytest.mark.asyncio +async def test_long_running_context_eph(live_eventhub): parser = argparse.ArgumentParser() parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) parser.add_argument("--storage-account", help="Storage account name", default=os.environ.get('AZURE_STORAGE_ACCOUNT')) @@ -174,9 +177,9 @@ def test_long_running_context_eph(live_eventhub): send_client = EventHubClient.from_connection_string(conn_str) pumps = [] for pid in ["0", "1"]: - sender = send_client.add_async_sender(partition_id=pid, send_timeout=0) + sender = send_client.create_sender(partition_id=pid, send_timeout=0) pumps.append(pump(pid, sender, 15)) - results = loop.run_until_complete(asyncio.gather(*pumps, return_exceptions=True)) + results = await asyncio.gather(*pumps, return_exceptions=True) assert not any(results) # Eventhub config and storage manager @@ -210,7 +213,7 @@ def test_long_running_context_eph(live_eventhub): tasks = asyncio.gather( host.open_async(), wait_and_close(host, args.duration), return_exceptions=True) - results = loop.run_until_complete(tasks) + results = await tasks assert not any(results) @@ -223,4 +226,5 @@ def test_long_running_context_eph(live_eventhub): config['namespace'] = os.environ['EVENT_HUB_NAMESPACE'] config['consumer_group'] = "$Default" config['partition'] = "0" - test_long_running_context_eph(config) + loop = asyncio.get_event_loop() + loop.run_until_complete(test_long_running_context_eph(config)) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py index 1f50144674b8..63c7940a539e 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py @@ -18,7 +18,7 @@ import pytest from logging.handlers import RotatingFileHandler -from azure.eventhub import EventPosition +from azure.eventhub import EventPosition, EventHubSharedKeyCredential from azure.eventhub.aio import EventHubClient @@ -103,13 +103,14 @@ async def test_long_running_receive_async(connection_str): if args.conn_str: client = EventHubClient.from_connection_string( args.conn_str, - eventhub=args.eventhub, auth_timeout=240, network_tracing=False) + event_hub_path=args.eventhub, auth_timeout=240, network_tracing=False) elif args.address: - client = EventHubClient( - args.address, - auth_timeout=240, - username=args.sas_policy, - password=args.sas_key) + client = EventHubClient(host=args.address, + event_hub_path=args.eventhub, + credential=EventHubSharedKeyCredential(args.sas_policy, args.sas_key), + auth_timeout=240, + network_tracing=False) + else: try: import pytest @@ -117,22 +118,19 @@ async def test_long_running_receive_async(connection_str): except ImportError: raise ValueError("Must specify either '--conn-str' or '--address'") - try: - if not args.partitions: - partitions = await client.get_partition_ids() - else: - partitions = args.partitions.split(",") - pumps = [] - for pid in partitions: - receiver = client.create_receiver( - partition_id=pid, - event_position=EventPosition(args.offset), - prefetch=50, - loop=loop) - pumps.append(pump(pid, receiver, args, args.duration)) - await asyncio.gather(*pumps) - finally: - pass + if not args.partitions: + partitions = await client.get_partition_ids() + else: + partitions = args.partitions.split(",") + pumps = [] + for pid in partitions: + receiver = client.create_receiver( + partition_id=pid, + event_position=EventPosition(args.offset), + prefetch=50, + loop=loop) + pumps.append(pump(pid, receiver, args, args.duration)) + await asyncio.gather(*pumps) if __name__ == '__main__': diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py index dd87e5324558..98dcccb0ea79 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py @@ -104,13 +104,12 @@ async def test_long_running_partition_send_async(connection_str): if args.conn_str: client = EventHubClient.from_connection_string( args.conn_str, - eventhub=args.eventhub, network_tracing=True) + event_hub_path=args.eventhub, network_tracing=True) elif args.address: - client = EventHubClient( - args.address, - username=args.sas_policy, - password=args.sas_key, - auth_timeout=500) + client = EventHubClient(host=args.address, + event_hub_path=args.eventhub, + credential=EventHubSharedKeyCredential(args.sas_policy, args.sas_key), + network_tracing=False) else: try: import pytest @@ -134,9 +133,7 @@ async def test_long_running_partition_send_async(connection_str): results = await asyncio.gather(*pumps, return_exceptions=True) assert not results except Exception as e: - logger.error("Sender failed: {}".format(e)) - finally: - pass + logger.error("EventSender failed: {}".format(e)) if __name__ == '__main__': diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py index 4e904d19453f..75f168027644 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py @@ -15,8 +15,10 @@ EventPosition, EventHubError, ConnectError, + ConnectionLostError, AuthenticationError, EventDataError, + EventDataSendError, ) from azure.eventhub.aio import EventHubClient @@ -95,7 +97,7 @@ async def test_send_partition_key_with_partition_async(connection_str): @pytest.mark.liveTest @pytest.mark.asyncio async def test_non_existing_entity_sender_async(connection_str): - client = EventHubClient.from_connection_string(connection_str, eventhub="nemo", network_tracing=False) + client = EventHubClient.from_connection_string(connection_str, event_hub_path="nemo", network_tracing=False) sender = client.create_sender(partition_id="1") with pytest.raises(AuthenticationError): await sender._open() @@ -104,7 +106,7 @@ async def test_non_existing_entity_sender_async(connection_str): @pytest.mark.liveTest @pytest.mark.asyncio async def test_non_existing_entity_receiver_async(connection_str): - client = EventHubClient.from_connection_string(connection_str, eventhub="nemo", network_tracing=False) + client = EventHubClient.from_connection_string(connection_str, event_hub_path="nemo", network_tracing=False) receiver = client.create_receiver(partition_id="0") with pytest.raises(AuthenticationError): await receiver._open() @@ -147,7 +149,7 @@ async def test_send_too_large_message_async(connection_str): sender = client.create_sender() try: data = EventData(b"A" * 1100000) - with pytest.raises(EventDataError): + with pytest.raises(EventDataSendError): await sender.send(data) finally: await sender.close() diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_properties_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_properties_async.py index 20641033e5bb..9401c65515aa 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_properties_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_properties_async.py @@ -5,7 +5,7 @@ #-------------------------------------------------------------------------- import pytest -from azure.eventhub import SharedKeyCredentials +from azure.eventhub import EventHubSharedKeyCredential from azure.eventhub.aio import EventHubClient @@ -13,7 +13,7 @@ @pytest.mark.asyncio async def test_get_properties(live_eventhub): client = EventHubClient(live_eventhub['hostname'], live_eventhub['event_hub'], - SharedKeyCredentials(live_eventhub['key_name'], live_eventhub['access_key']) + EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key']) ) properties = await client.get_properties() assert properties['path'] == live_eventhub['event_hub'] and properties['partition_ids'] == ['0', '1'] @@ -23,7 +23,7 @@ async def test_get_properties(live_eventhub): @pytest.mark.asyncio async def test_get_partition_ids(live_eventhub): client = EventHubClient(live_eventhub['hostname'], live_eventhub['event_hub'], - SharedKeyCredentials(live_eventhub['key_name'], live_eventhub['access_key']) + EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key']) ) partition_ids = await client.get_partition_ids() assert partition_ids == ['0', '1'] @@ -33,7 +33,7 @@ async def test_get_partition_ids(live_eventhub): @pytest.mark.asyncio async def test_get_partition_properties(live_eventhub): client = EventHubClient(live_eventhub['hostname'], live_eventhub['event_hub'], - SharedKeyCredentials(live_eventhub['key_name'], live_eventhub['access_key']) + EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key']) ) properties = await client.get_partition_properties('0') assert properties['event_hub_path'] == live_eventhub['event_hub'] \ diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receiver_iterator_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receiver_iterator_async.py new file mode 100644 index 000000000000..b087bb783bb1 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receiver_iterator_async.py @@ -0,0 +1,30 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import os +import asyncio +import pytest +import time + +from azure.eventhub import EventData, EventPosition, EventHubError, TransportType +from azure.eventhub.aio import EventHubClient + + +@pytest.mark.liveTest +@pytest.mark.asyncio +async def test_receive_iterator_async(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + async with receiver: + received = await receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Receiving only a single event")) + async for item in receiver: + received.append(item) + break + assert len(received) == 1 + assert list(received[-1].body)[0] == b"Receiving only a single event" \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py index ebbace8c8a05..e7a4fcad61f0 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py @@ -15,6 +15,7 @@ EventHubError) from azure.eventhub.aio import EventHubClient +SLEEP = False @pytest.mark.liveTest @pytest.mark.asyncio @@ -25,15 +26,18 @@ async def test_send_with_long_interval_async(connstr_receivers): try: await sender.send(EventData(b"A single event")) for _ in range(1): - #await asyncio.sleep(300) - sender._handler._connection._conn.destroy() + if SLEEP: + await asyncio.sleep(300) + else: + sender._handler._connection._conn.destroy() await sender.send(EventData(b"A single event")) finally: await sender.close() received = [] for r in receivers: - r._handler._connection._conn.destroy() + if not SLEEP: # if sender sleeps, the receivers will be disconnected. destroy connection to simulate + r._handler._connection._conn.destroy() received.extend(r.receive(timeout=1)) assert len(received) == 2 assert list(received[0].body)[0] == b"A single event" @@ -58,12 +62,16 @@ async def test_send_with_forced_conn_close_async(connstr_receivers): sender = client.create_sender() try: await sender.send(EventData(b"A single event")) - sender._handler._message_sender.destroy() - await asyncio.sleep(300) + if SLEEP: + await asyncio.sleep(300) + else: + sender._handler._connection._conn.destroy() await sender.send(EventData(b"A single event")) await sender.send(EventData(b"A single event")) - sender._handler._message_sender.destroy() - await asyncio.sleep(300) + if SLEEP: + await asyncio.sleep(300) + else: + sender._handler._connection._conn.destroy() await sender.send(EventData(b"A single event")) await sender.send(EventData(b"A single event")) finally: @@ -71,6 +79,8 @@ async def test_send_with_forced_conn_close_async(connstr_receivers): received = [] for r in receivers: - received.extend(pump(r)) + if not SLEEP: + r._handler._connection._conn.destroy() + received.extend(pump(r)) assert len(received) == 5 assert list(received[0].body)[0] == b"A single event" diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py index 9883be044345..fc3a5559415d 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py @@ -30,17 +30,17 @@ async def test_send_with_partition_key_async(connstr_receivers): data = EventData(str(data_val)) # data.partition_key = partition_key data_val += 1 - await sender.send(data, batching_label=partition_key) + await sender.send(data, partition_key=partition_key) found_partition_keys = {} for index, partition in enumerate(receivers): received = partition.receive(timeout=5) for message in received: try: - existing = found_partition_keys[message._batching_label] + existing = found_partition_keys[message.partition_key] assert existing == index except KeyError: - found_partition_keys[message._batching_label] = index + found_partition_keys[message.partition_key] = index @pytest.mark.liveTest diff --git a/sdk/eventhub/azure-eventhubs/tests/test_auth.py b/sdk/eventhub/azure-eventhubs/tests/test_auth.py new file mode 100644 index 000000000000..aa728d2d54de --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/tests/test_auth.py @@ -0,0 +1,40 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import pytest +import time + +from azure.eventhub import EventData, EventHubClient, EventPosition + + +@pytest.mark.liveTest +def test_client_secret_credential(aad_credential, live_eventhub): + try: + from azure.identity import ClientSecretCredential + except ImportError: + pytest.skip("No azure identity library") + client_id, secret, tenant_id = aad_credential + credential = ClientSecretCredential(client_id=client_id, secret=secret, tenant_id=tenant_id) + client = EventHubClient(host=live_eventhub['hostname'], + event_hub_path=live_eventhub['event_hub'], + credential=credential, + user_agent='customized information') + sender = client.create_sender(partition_id='0') + receiver = client.create_receiver(partition_id='0', event_position=EventPosition.new_events_only()) + + with receiver: + received = receiver.receive(timeout=1) + assert len(received) == 0 + + with sender: + event = EventData(body='A single message') + sender.send(event) + time.sleep(1) + + received = receiver.receive(timeout=1) + + assert len(received) == 1 + assert list(received[0].body)[0] == 'A single message'.encode('utf-8') diff --git a/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py b/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py index df7a184a227e..75a39eb185bf 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py @@ -20,7 +20,5 @@ def test_iothub_send_single_event(iot_connection_str, device_id): sender = client.create_sender(operation='/messages/devicebound') try: sender.send(EventData(b"A single event", to_device=device_id)) - except: - raise finally: sender.close() diff --git a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py index 7ae53a0b2496..b7477bc3e39f 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py @@ -20,6 +20,7 @@ from azure.eventhub import EventPosition from azure.eventhub import EventHubClient +from azure.eventhub import EventHubSharedKeyCredential def get_logger(filename, level=logging.INFO): azure_logger = logging.getLogger("azure.eventhub") @@ -76,7 +77,7 @@ def pump(receivers, duration): batch[-1].offset.value)) print("Total received {}".format(total)) except Exception as e: - print("Receiver failed: {}".format(e)) + print("EventReceiver failed: {}".format(e)) raise @@ -97,12 +98,13 @@ def test_long_running_receive(connection_str): if args.conn_str: client = EventHubClient.from_connection_string( args.conn_str, - eventhub=args.eventhub, network_tracing=False) + event_hub_path=args.eventhub, network_tracing=False) elif args.address: - client = EventHubClient( - args.address, - username=args.sas_policy, - password=args.sas_key) + client = EventHubClient(host=args.address, + event_hub_path=args.eventhub, + credential=EventHubSharedKeyCredential(args.sas_policy, args.sas_key), + auth_timeout=240, + network_tracing=False) else: try: import pytest diff --git a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py index 90c6d0dc3cf9..ead603ed7372 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py @@ -17,7 +17,7 @@ import pytest from logging.handlers import RotatingFileHandler -from azure.eventhub import EventHubClient, Sender, EventData +from azure.eventhub import EventHubClient, EventSender, EventData def get_logger(filename, level=logging.INFO): @@ -101,12 +101,13 @@ def test_long_running_send(connection_str): if args.conn_str: client = EventHubClient.from_connection_string( args.conn_str, - eventhub=args.eventhub) + event_hub_path=args.eventhub) elif args.address: - client = EventHubClient( - args.address, - username=args.sas_policy, - password=args.sas_key) + client = EventHubClient(host=args.address, + event_hub_path=args.eventhub, + credential=EventHubSharedKeyCredential(args.sas_policy, args.sas_key), + auth_timeout=240, + network_tracing=False) else: try: import pytest diff --git a/sdk/eventhub/azure-eventhubs/tests/test_negative.py b/sdk/eventhub/azure-eventhubs/tests/test_negative.py index 206c5b415002..7111840995f9 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_negative.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_negative.py @@ -16,6 +16,7 @@ AuthenticationError, ConnectError, EventDataError, + EventDataSendError, EventHubClient) @@ -86,7 +87,7 @@ def test_send_partition_key_with_partition_sync(connection_str): @pytest.mark.liveTest def test_non_existing_entity_sender(connection_str): - client = EventHubClient.from_connection_string(connection_str, eventhub="nemo", network_tracing=False) + client = EventHubClient.from_connection_string(connection_str, event_hub_path="nemo", network_tracing=False) sender = client.create_sender(partition_id="1") with pytest.raises(AuthenticationError): sender._open() @@ -94,7 +95,7 @@ def test_non_existing_entity_sender(connection_str): @pytest.mark.liveTest def test_non_existing_entity_receiver(connection_str): - client = EventHubClient.from_connection_string(connection_str, eventhub="nemo", network_tracing=False) + client = EventHubClient.from_connection_string(connection_str, event_hub_path="nemo", network_tracing=False) receiver = client.create_receiver(partition_id="0") with pytest.raises(AuthenticationError): receiver._open() @@ -134,7 +135,7 @@ def test_send_too_large_message(connection_str): sender = client.create_sender() try: data = EventData(b"A" * 1100000) - with pytest.raises(EventDataError): + with pytest.raises(EventDataSendError): sender.send(data) finally: sender.close() diff --git a/sdk/eventhub/azure-eventhubs/tests/test_properties.py b/sdk/eventhub/azure-eventhubs/tests/test_properties.py index b1889bdcf179..d16820a00083 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_properties.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_properties.py @@ -5,13 +5,13 @@ #-------------------------------------------------------------------------- import pytest -from azure.eventhub import EventHubClient, SharedKeyCredentials +from azure.eventhub import EventHubClient, EventHubSharedKeyCredential @pytest.mark.liveTest def test_get_properties(live_eventhub): client = EventHubClient(live_eventhub['hostname'], live_eventhub['event_hub'], - SharedKeyCredentials(live_eventhub['key_name'], live_eventhub['access_key']) + EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key']) ) properties = client.get_properties() assert properties['path'] == live_eventhub['event_hub'] and properties['partition_ids'] == ['0', '1'] @@ -20,7 +20,7 @@ def test_get_properties(live_eventhub): @pytest.mark.liveTest def test_get_partition_ids(live_eventhub): client = EventHubClient(live_eventhub['hostname'], live_eventhub['event_hub'], - SharedKeyCredentials(live_eventhub['key_name'], live_eventhub['access_key']) + EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key']) ) partition_ids = client.get_partition_ids() assert partition_ids == ['0', '1'] @@ -29,7 +29,7 @@ def test_get_partition_ids(live_eventhub): @pytest.mark.liveTest def test_get_partition_properties(live_eventhub): client = EventHubClient(live_eventhub['hostname'], live_eventhub['event_hub'], - SharedKeyCredentials(live_eventhub['key_name'], live_eventhub['access_key']) + EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key']) ) properties = client.get_partition_properties('0') assert properties['event_hub_path'] == live_eventhub['event_hub'] \ diff --git a/sdk/eventhub/azure-eventhubs/tests/test_receiver_iterator.py b/sdk/eventhub/azure-eventhubs/tests/test_receiver_iterator.py new file mode 100644 index 000000000000..6f0dd3456df6 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/tests/test_receiver_iterator.py @@ -0,0 +1,31 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import pytest +import time +import datetime +from threading import Thread + +from azure.eventhub import EventData, EventHubClient, EventPosition, TransportType + + +@pytest.mark.liveTest +def test_receive_iterator(connstr_senders): + connection_str, senders = connstr_senders + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + with receiver: + received = receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Receiving only a single event")) + + for item in receiver: + received.append(item) + break + + assert len(received) == 1 + assert received[0].body_as_str() == "Receiving only a single event" + assert list(received[-1].body)[0] == b"Receiving only a single event" diff --git a/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py b/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py index b24cca267c82..d65f774108e0 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py @@ -14,6 +14,7 @@ EventHubError, EventHubClient) +SLEEP = False @pytest.mark.liveTest def test_send_with_long_interval_sync(connstr_receivers): @@ -23,12 +24,17 @@ def test_send_with_long_interval_sync(connstr_receivers): with sender: sender.send(EventData(b"A single event")) for _ in range(1): - time.sleep(300) + if SLEEP: + time.sleep(300) + else: + sender._handler._connection._conn.destroy() sender.send(EventData(b"A single event")) received = [] for r in receivers: - received.extend(r.receive(timeout=1)) + if not SLEEP: + r._handler._connection._conn.destroy() + received.extend(r.receive(timeout=3)) assert len(received) == 2 assert list(received[0].body)[0] == b"A single event" @@ -42,16 +48,23 @@ def test_send_with_forced_conn_close_sync(connstr_receivers): with sender: sender.send(EventData(b"A single event")) sender._handler._connection._conn.destroy() - time.sleep(300) + if SLEEP: + time.sleep(300) + else: + sender._handler._connection._conn.destroy() sender.send(EventData(b"A single event")) sender.send(EventData(b"A single event")) - sender._handler._connection._conn.destroy() - time.sleep(300) + if SLEEP: + time.sleep(300) + else: + sender._handler._connection._conn.destroy() sender.send(EventData(b"A single event")) sender.send(EventData(b"A single event")) received = [] for r in receivers: - received.extend(r.receive(timeout=1)) + if not SLEEP: + r._handler._connection._conn.destroy() + received.extend(r.receive(timeout=1)) assert len(received) == 5 assert list(received[0].body)[0] == b"A single event" diff --git a/sdk/eventhub/azure-eventhubs/tests/test_send.py b/sdk/eventhub/azure-eventhubs/tests/test_send.py index 3af0cbed2ef2..5a497f34dcca 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_send.py @@ -26,17 +26,17 @@ def test_send_with_partition_key(connstr_receivers): data = EventData(str(data_val)) #data.partition_key = partition_key data_val += 1 - sender.send(data, batching_label=partition_key) + sender.send(data, partition_key=partition_key) found_partition_keys = {} for index, partition in enumerate(receivers): received = partition.receive(timeout=5) for message in received: try: - existing = found_partition_keys[message._batching_label] + existing = found_partition_keys[message._partition_key] assert existing == index except KeyError: - found_partition_keys[message._batching_label] = index + found_partition_keys[message._partition_key] = index @pytest.mark.liveTest From 87c11dda302a810b7515cbcb01b712879d6e7d0d Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 17 Jun 2019 01:23:23 -0700 Subject: [PATCH 05/54] Fix missing consumer group directory in EPH --- .../azure_storage_checkpoint_manager.py | 35 ++++++++++++------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/azure_storage_checkpoint_manager.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/azure_storage_checkpoint_manager.py index 59df582efc87..05440824f23b 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/azure_storage_checkpoint_manager.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/azure_storage_checkpoint_manager.py @@ -98,7 +98,7 @@ def initialize(self, host): request_session=self.request_session) self.consumer_group_directory = self.storage_blob_prefix + self.host.eh_config.consumer_group - # Checkpoint Managment Methods + # Checkpoint Management Methods async def create_checkpoint_store_if_not_exists_async(self): """ @@ -120,6 +120,7 @@ async def get_checkpoint_async(self, partition_id): :return: Given partition checkpoint info, or `None` if none has been previously stored. :rtype: ~azure.eventprocessorhost.checkpoint.Checkpoint """ + lease = await self.get_lease_async(partition_id) checkpoint = None if lease: @@ -170,7 +171,7 @@ async def delete_checkpoint_async(self, partition_id): """ return # Make this a no-op to avoid deleting leases by accident. - # Lease Managment Methods + # Lease Management Methods async def create_lease_store_if_not_exists_async(self): """ @@ -211,14 +212,16 @@ async def get_lease_async(self, partition_id): :return: lease info for the partition, or `None`. :rtype: ~azure.eventprocessorhost.lease.Lease """ + blob_name = "{}/{}".format(self.consumer_group_directory, partition_id) try: blob = await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.get_blob_to_text, - self.lease_container_name, partition_id)) + self.lease_container_name, blob_name)) lease = AzureBlobLease() lease.with_blob(blob) + async def state(): """ Allow lease to curry storage_client to get state @@ -230,7 +233,7 @@ async def state(): functools.partial( self.storage_client.get_blob_properties, self.lease_container_name, - partition_id)) + blob_name)) return res.properties.lease.state except Exception as err: # pylint: disable=broad-except _logger.error("Failed to get lease state %r %r", err, partition_id) @@ -265,6 +268,7 @@ async def create_lease_if_not_exists_async(self, partition_id): :rtype: ~azure.eventprocessorhost.lease.Lease """ return_lease = None + blob_name = "{}/{}".format(self.consumer_group_directory, partition_id) try: return_lease = AzureBlobLease() return_lease.partition_id = partition_id @@ -274,12 +278,13 @@ async def create_lease_if_not_exists_async(self, partition_id): self.lease_container_name, partition_id, json.dumps({k:v for k, v in serializable_lease.items() if k != 'event_processor_context'})) + await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.create_blob_from_text, self.lease_container_name, - partition_id, + blob_name, json_lease)) except Exception: # pylint: disable=broad-except try: @@ -297,12 +302,14 @@ async def delete_lease_async(self, lease): :param lease: The stored lease to be deleted. :type lease: ~azure.eventprocessorhost.lease.Lease """ + + blob_name = "{}/{}".format(self.consumer_group_directory, lease.partition_id) await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.delete_blob, self.lease_container_name, - lease.partition_id, + blob_name, lease_id=lease.token)) async def acquire_lease_async(self, lease): @@ -319,6 +326,7 @@ async def acquire_lease_async(self, lease): retval = True new_lease_id = str(uuid.uuid4()) partition_id = lease.partition_id + blob_name = "{}/{}".format(self.consumer_group_directory, lease.partition_id) try: if asyncio.iscoroutinefunction(lease.state): state = await lease.state() @@ -342,7 +350,7 @@ async def acquire_lease_async(self, lease): functools.partial( self.storage_client.change_blob_lease, self.lease_container_name, - partition_id, + blob_name, lease.token, new_lease_id)) lease.token = new_lease_id @@ -353,7 +361,7 @@ async def acquire_lease_async(self, lease): functools.partial( self.storage_client.acquire_blob_lease, self.lease_container_name, - partition_id, + blob_name, self.lease_duration, new_lease_id)) lease.owner = self.host.host_name @@ -377,13 +385,14 @@ async def renew_lease_async(self, lease): :return: `True` if the lease was renewed successfully, `False` if not. :rtype: bool """ + blob_name = "{}/{}".format(self.consumer_group_directory, lease.partition_id) try: await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.renew_blob_lease, self.lease_container_name, - lease.partition_id, + blob_name, lease_id=lease.token, timeout=self.lease_duration)) except Exception as err: # pylint: disable=broad-except @@ -406,6 +415,7 @@ async def release_lease_async(self, lease): :rtype: bool """ lease_id = None + blob_name = "{}/{}".format(self.consumer_group_directory, lease.partition_id) try: _logger.info("Releasing lease %r %r", self.host.guid, lease.partition_id) lease_id = lease.token @@ -419,7 +429,7 @@ async def release_lease_async(self, lease): functools.partial( self.storage_client.create_blob_from_text, self.lease_container_name, - lease.partition_id, + blob_name, json.dumps(released_copy.serializable()), lease_id=lease_id)) await self.host.loop.run_in_executor( @@ -427,7 +437,7 @@ async def release_lease_async(self, lease): functools.partial( self.storage_client.release_blob_lease, self.lease_container_name, - lease.partition_id, + blob_name, lease_id)) except Exception as err: # pylint: disable=broad-except _logger.error("Failed to release lease %r %r %r", @@ -455,6 +465,7 @@ async def update_lease_async(self, lease): _logger.debug("Updating lease %r %r", self.host.guid, lease.partition_id) + blob_name = "{}/{}".format(self.consumer_group_directory, lease.partition_id) # First, renew the lease to make sure the update will go through. if await self.renew_lease_async(lease): try: @@ -463,7 +474,7 @@ async def update_lease_async(self, lease): functools.partial( self.storage_client.create_blob_from_text, self.lease_container_name, - lease.partition_id, + blob_name, json.dumps(lease.serializable()), lease_id=lease.token)) From 89bb40ecd46e814c3e7395e325c2072fa8dd101f Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 17 Jun 2019 01:24:07 -0700 Subject: [PATCH 06/54] Fix livetest code problem --- .../tests/asynctests/test_longrunning_eph.py | 8 +- .../test_longrunning_eph_with_context.py | 9 +- .../asynctests/test_longrunning_send_async.py | 33 ++---- .../tests/asynctests/test_receive_async.py | 109 +++++++++--------- .../tests/asynctests/test_send_async.py | 6 +- .../tests/test_longrunning_send.py | 36 +++--- .../azure-eventhubs/tests/test_receive.py | 2 +- 7 files changed, 90 insertions(+), 113 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph.py index 11fda8eddefa..800bad9b0f74 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph.py @@ -126,14 +126,14 @@ async def pump(pid, sender, duration): try: async with sender: - list = [] + event_list = [] while time.time() < deadline: data = EventData(body=b"D" * 512) - list.append(data) + event_list.append(data) total += 1 if total % 100 == 0: - await sender.send(data) - list = [] + await sender.send(event_list) + event_list = [] logger.info("{}: Send total {}".format(pid, total)) except Exception as err: logger.error("{}: Send failed {}".format(pid, err)) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py index 2a6a83048251..f7cdb5c1de3e 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py @@ -45,6 +45,7 @@ def get_logger(filename, level=logging.INFO): return azure_logger + logger = get_logger("eph_test_async.log", logging.INFO) @@ -130,14 +131,14 @@ async def pump(pid, sender, duration): try: async with sender: - list = [] + event_list = [] while time.time() < deadline: data = EventData(body=b"D" * 512) - list.append(data) + event_list.append(data) total += 1 if total % 100 == 0: - await sender.send(list) - list = [] + await sender.send(event_list) + event_list = [] logger.info("{}: Send total {}".format(pid, total)) except Exception as err: logger.error("{}: Send failed {}".format(pid, err)) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py index 98dcccb0ea79..0cf4c8044f54 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py @@ -13,7 +13,7 @@ import pytest from logging.handlers import RotatingFileHandler -from azure.eventhub import EventData +from azure.eventhub import EventData, EventHubSharedKeyCredential from azure.eventhub.aio import EventHubClient @@ -39,12 +39,8 @@ def get_logger(filename, level=logging.INFO): return azure_logger -logger = get_logger("send_test_async.log", logging.INFO) - -def check_send_successful(outcome, condition): - if outcome.value != 0: - print("Send failed {}".format(condition)) +logger = get_logger("send_test_async.log", logging.INFO) async def get_partitions(args): @@ -56,26 +52,16 @@ async def pump(pid, sender, args, duration): deadline = time.time() + duration total = 0 - def data_generator(): - for i in range(args.batch): - yield b"D" * args.payload - - if args.batch > 1: - logger.info("{}: Sending batched messages".format(pid)) - else: - logger.info("{}: Sending single messages".format(pid)) - try: async with sender: + event_list = [] while time.time() < deadline: - if args.batch > 1: - data = EventData(body=data_generator()) - else: - data = EventData(body=b"D" * args.payload) - sender.queue_message(data, callback=check_send_successful) - total += args.batch + data = EventData(body=b"D" * args.payload) + event_list.append(data) + total += 1 if total % 100 == 0: - await sender.send_pending_messages() + await sender.send(event_list) + event_list = [] logger.info("{}: Send total {}".format(pid, total)) except Exception as err: logger.error("{}: Send failed {}".format(pid, err)) @@ -89,8 +75,7 @@ async def test_long_running_partition_send_async(connection_str): parser = argparse.ArgumentParser() parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) parser.add_argument("--payload", help="payload size", type=int, default=1024) - parser.add_argument("--batch", help="Number of events to send and wait", type=int, default=200) - parser.add_argument("--partitions", help="Comma seperated partition IDs") + parser.add_argument("--partitions", help="Comma separated partition IDs") parser.add_argument("--conn-str", help="EventHub connection string", default=connection_str) parser.add_argument("--eventhub", help="Name of EventHub") parser.add_argument("--address", help="Address URI to the EventHub entity") diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py index 18db98649264..8f8457904af6 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py @@ -9,7 +9,7 @@ import pytest import time -from azure.eventhub import EventData, EventPosition, EventHubError, TransportType +from azure.eventhub import EventData, EventPosition, EventHubError, TransportType, ConnectionLostError, ConnectError from azure.eventhub.aio import EventHubClient @@ -101,7 +101,6 @@ async def test_receive_with_datetime_async(connstr_senders): @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_with_sequence_no_async(connstr_senders): - # TODO: sampe problem as the sync version connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) @@ -180,15 +179,17 @@ async def test_exclusive_receiver_async(connstr_senders): senders[0].send(EventData(b"Receiving only a single event")) client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receivers = [] - for exclusive_receiver_priority in [10, 20]: - receivers.append(client.create_receiver(partition_id="0", exclusive_receiver_priority=exclusive_receiver_priority, prefetch=5)) - outputs = await asyncio.gather( - pump(receivers[0]), - pump(receivers[1]), - return_exceptions=True) - assert isinstance(outputs[0], EventHubError) # TODO; it's LinkDetach error - assert outputs[1] == 1 + receiver1 = client.create_receiver(partition_id="0", exclusive_receiver_priority=10, prefetch=5) + receiver2 = client.create_receiver(partition_id="0", exclusive_receiver_priority=20, prefetch=10) + try: + await pump(receiver1) + output2 = await pump(receiver2) + with pytest.raises(ConnectionLostError): + await receiver1.receive(timeout=1) + assert output2 == 1 + finally: + await receiver1.close() + await receiver2.close() @pytest.mark.liveTest @@ -206,10 +207,9 @@ async def test_multiple_receiver_async(connstr_senders): try: more_partitions = await client.get_properties() assert more_partitions["partition_ids"] == ["0", "1"] - outputs = await asyncio.gather( - pump(receivers[0]), - pump(receivers[1]), - return_exceptions=True) + outputs = [0, 0] + outputs[0] = await pump(receivers[0]) + outputs[1] = await pump(receivers[1]) assert isinstance(outputs[0], int) and outputs[0] == 1 assert isinstance(outputs[1], int) and outputs[1] == 1 finally: @@ -224,19 +224,17 @@ async def test_exclusive_receiver_after_non_exclusive_receiver_async(connstr_sen senders[0].send(EventData(b"Receiving only a single event")) client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receivers = [] - receivers.append(client.create_receiver(partition_id="0", prefetch=10)) - receivers.append(client.create_receiver(partition_id="0", exclusive_receiver_priority=15, prefetch=10)) + receiver1 = client.create_receiver(partition_id="0", prefetch=10) + receiver2 = client.create_receiver(partition_id="0", exclusive_receiver_priority=15, prefetch=10) try: - outputs = await asyncio.gather( - pump(receivers[0]), - pump(receivers[1], sleep=5), - return_exceptions=True) - assert isinstance(outputs[0], EventHubError) - assert isinstance(outputs[1], int) and outputs[1] == 1 + await pump(receiver1) + output2 = await pump(receiver2) + with pytest.raises(ConnectionLostError): + await receiver1.receive(timeout=1) + assert output2 == 1 finally: - for r in receivers: - await r.close() + await receiver1.close() + await receiver2.close() @pytest.mark.liveTest @@ -246,19 +244,16 @@ async def test_non_exclusive_receiver_after_exclusive_receiver_async(connstr_sen senders[0].send(EventData(b"Receiving only a single event")) client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receivers = [] - receivers.append(client.create_receiver(partition_id="0", exclusive_receiver_priority=15, prefetch=10)) - receivers.append(client.create_receiver(partition_id="0", prefetch=10)) + receiver1 = client.create_receiver(partition_id="0", exclusive_receiver_priority=15, prefetch=10) + receiver2 = client.create_receiver(partition_id="0", prefetch=10) try: - outputs = await asyncio.gather( - pump(receivers[0]), - pump(receivers[1]), - return_exceptions=True) - assert isinstance(outputs[1], EventHubError) - assert isinstance(outputs[0], int) and outputs[0] == 1 + output1 = await pump(receiver1) + with pytest.raises(ConnectError): + await pump(receiver2) + assert output1 == 1 finally: - for r in receivers: - await r.close() + await receiver1.close() + await receiver2.close() @pytest.mark.liveTest @@ -281,21 +276,22 @@ def batched(): client = EventHubClient.from_connection_string(connection_str, network_tracing=False) receiver = client.create_receiver(partition_id="0", prefetch=500, event_position=EventPosition('@latest')) - async with receiver: - received = await receiver.receive(timeout=5) - assert len(received) == 0 - senders[0].send(batched()) + received = await receiver.receive(timeout=5) + assert len(received) == 0 + + senders[0].send(batched()) - await asyncio.sleep(1) + await asyncio.sleep(1) - received = await receiver.receive(max_batch_size=15, timeout=5) - assert len(received) == 15 + received = await receiver.receive(max_batch_size=15, timeout=5) + assert len(received) == 15 + await receiver.close() - for index, message in enumerate(received): - assert list(message.body)[0] == "Event Data {}".format(index).encode('utf-8') - assert (app_prop_key.encode('utf-8') in message.application_properties) \ - and (dict(message.application_properties)[app_prop_key.encode('utf-8')] == app_prop_value.encode('utf-8')) + for index, message in enumerate(received): + assert list(message.body)[0] == "Event Data {}".format(index).encode('utf-8') + assert (app_prop_key.encode('utf-8') in message.application_properties) \ + and (dict(message.application_properties)[app_prop_key.encode('utf-8')] == app_prop_value.encode('utf-8')) @pytest.mark.liveTest @@ -309,14 +305,15 @@ async def test_receive_over_websocket_async(connstr_senders): for i in range(20): event_list.append(EventData("Event Number {}".format(i))) - async with receiver: - received = await receiver.receive(timeout=5) - assert len(received) == 0 + received = await receiver.receive(timeout=5) + assert len(received) == 0 - with senders[0]: - senders[0].send(event_list) + with senders[0]: + senders[0].send(event_list) - time.sleep(1) + time.sleep(1) + + received = await receiver.receive(max_batch_size=50, timeout=5) + assert len(received) == 20 - received = await receiver.receive(max_batch_size=50, timeout=5) - assert len(received) == 20 + await receiver.close() diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py index fc3a5559415d..54adec39ef34 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py @@ -176,8 +176,9 @@ async def test_send_multiple_clients_async(connstr_receivers): client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender_0 = client.create_sender(partition_id="0") sender_1 = client.create_sender(partition_id="1") - async with sender_0 and sender_1: + async with sender_0: await sender_0.send(EventData(b"Message 0")) + async with sender_1: await sender_1.send(EventData(b"Message 1")) partition_0 = receivers[0].receive(timeout=2) @@ -242,3 +243,6 @@ async def test_send_over_websocket_async(connstr_receivers): received.extend(r.receive(timeout=3)) assert len(received) == 20 + + for r in receivers: + r.close() diff --git a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py index ead603ed7372..e0f535faf8bb 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py @@ -17,7 +17,7 @@ import pytest from logging.handlers import RotatingFileHandler -from azure.eventhub import EventHubClient, EventSender, EventData +from azure.eventhub import EventHubClient, EventSender, EventData, EventHubSharedKeyCredential def get_logger(filename, level=logging.INFO): @@ -55,30 +55,20 @@ def main(client, args): deadline = time.time() + args.duration total = 0 - def data_generator(): - for i in range(args.batch): - yield b"D" * args.payload - - if args.batch > 1: - print("Sending batched messages") - else: - print("Sending single messages") - try: - while time.time() < deadline: - if args.batch > 1: - data = EventData(batch=data_generator()) - else: - data = EventData(batch=b"D" * args.payload) - sender.queue_message(data, callback=check_send_successful) - total += args.batch - if total % 10000 == 0: - sender.send_pending_messages() - print("Send total {}".format(total)) + with sender: + event_list = [] + while time.time() < deadline: + data = EventData(body=b"D" * args.payload) + event_list.append(data) + total += 1 + if total % 100 == 0: + sender.send(event_list) + event_list = [] + print("Send total {}".format(total)) except Exception as err: print("Send failed {}".format(err)) - finally: - sender.close() + raise print("Sent total {}".format(total)) @@ -90,7 +80,6 @@ def test_long_running_send(connection_str): parser = argparse.ArgumentParser() parser.add_argument("--duration", help="Duration in seconds of the test", type=int, default=30) parser.add_argument("--payload", help="payload size", type=int, default=512) - parser.add_argument("--batch", help="Number of events to send and wait", type=int, default=1) parser.add_argument("--conn-str", help="EventHub connection string", default=connection_str) parser.add_argument("--eventhub", help="Name of EventHub") parser.add_argument("--address", help="Address URI to the EventHub entity") @@ -120,5 +109,6 @@ def test_long_running_send(connection_str): except KeyboardInterrupt: pass + if __name__ == '__main__': test_long_running_send(os.environ.get('EVENT_HUB_CONNECTION_STR')) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_receive.py index 38944e553bdf..c32d748a6ce0 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_receive.py @@ -138,7 +138,7 @@ def test_receive_with_custom_datetime_sync(connstr_senders): client = EventHubClient.from_connection_string(connection_str, network_tracing=False) for i in range(5): senders[0].send(EventData(b"Message before timestamp")) - time.sleep(60) + time.sleep(65) now = datetime.datetime.utcnow() offset = datetime.datetime(now.year, now.month, now.day, now.hour, now.minute) From 06a6ebf6eaf4961af7cf38acc7455f21a6a0e4e2 Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 17 Jun 2019 01:24:23 -0700 Subject: [PATCH 07/54] history and readme --- sdk/eventhub/azure-eventhubs/HISTORY.rst | 27 +++ sdk/eventhub/azure-eventhubs/README.md | 241 +++++++++++++++++++++++ sdk/eventhub/azure-eventhubs/README.rst | 229 --------------------- sdk/eventhub/azure-eventhubs/setup.py | 2 +- 4 files changed, 269 insertions(+), 230 deletions(-) create mode 100644 sdk/eventhub/azure-eventhubs/README.md delete mode 100644 sdk/eventhub/azure-eventhubs/README.rst diff --git a/sdk/eventhub/azure-eventhubs/HISTORY.rst b/sdk/eventhub/azure-eventhubs/HISTORY.rst index e7fd3a2571f5..1914d356cd43 100644 --- a/sdk/eventhub/azure-eventhubs/HISTORY.rst +++ b/sdk/eventhub/azure-eventhubs/HISTORY.rst @@ -3,6 +3,33 @@ Release History =============== +2.0.0-preview.1 (2019-06-17) +---------------------------- + +- Added more configuration parameters when creating EventHubClient. +- New error hierarchy + + - `azure.error.EventHubError` + - `azure.error.ConnectionLostError` + - `azure.error.ConnectError` + - `azure.error.AuthenticationError` + - `azure.error.EventDataError` + - `azure.error.EventDataSendError` + +- Renamed Sender/Receiver to EventSender/EventReceiver New APIs for creating EventSender/EventReceiver. +- Rename class azure.eventhub.Offset to azure.eventhub.EventPosition +- Reorganzied connection management, EventHubClient is no longer responsible for opening/closing EventSender/EventReceiver. + + - Each EventSender/EventReceiver is responsible for its own connection management. + - Added support for context manager on EventSender and EventReceiver. + +- Reorganized async APIs into "azure.eventhub.aio" namespace and rename to drop the "_async" suffix. +- EventReceiver is now iteratable. +- Added support for authentication using azure-core credential. +- Added support for transport using AMQP over Websocket. +- Updated uAMQP dependency to 1.2.0 + + 1.3.1 (2019-02-28) ------------------ diff --git a/sdk/eventhub/azure-eventhubs/README.md b/sdk/eventhub/azure-eventhubs/README.md new file mode 100644 index 000000000000..87e1bf6e25fc --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/README.md @@ -0,0 +1,241 @@ +# Azure Event Hubs client library for Python + +Azure Event Hubs is a big data streaming platform and event ingestion service. It can receive and process millions of events per second. + +Use the Event Hubs client library for Python to: + +- Publish events to the Event Hubs service through a sender. +- Read events from the Event Hubs service through a receiver. + +On Python 3.5.3 and above, it also includes: + +- An async sender and receiver that supports async/await methods. +- An Event Processor Host module that manages the distribution of partition readers. + +[Source code](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/eventhub/azure-eventhubs) | [Package (PyPi)](https://pypi.org/project/azure-eventhub/) | [API reference documentation](https://docs.microsoft.com/python/api/azure-eventhub) | [Product documentation](https://docs.microsoft.com/en-ca/azure/event-hubs/) + +# Getting started + +## Install the package + +Install the Azure Event Hubs client library for Python with pip: + +``` +$ pip install azure-eventhub +``` + +### Prerequisites + +- An Azure subscription. +- Python 2.7, 3.5 or later. +- An existing Event Hubs namespace and event hub. You can create these entities by following the instructions in [this article](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-create) + +## Authenticate the client + +Interaction with Event Hubs starts with an instance of the EventHubClient class. You need the host name, SAS/AAD credential and event hub name to instantiate the client object. + +### Get credentials + +You can find credential information in [Azure Portal](https://portal.azure.com/). + +### Create client + +There are several ways to instantiate the EventHubClient object and the following code snippets demonstrate one way: + +```python +import os +from azure.eventhub import EventHubClient + +connection_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( + os.environ['EVENT_HUB_HOSTNAME'], + os.environ['EVENT_HUB_SAS_POLICY'], + os.environ['EVENT_HUB_SAS_KEY'], + os.environ['EVENT_HUB_NAME']) +client = EventHubClient.from_connection_string(connection_str) +``` + +# Key concepts + +- **Namespace:** An Event Hubs namespace provides a unique scoping container, referenced by its fully qualified domain name, in which you create one or more event hubs or Kafka topics. + +- **Event publishers**: Any entity that sends data to an event hub is an event producer, or event publisher. Event publishers can publish events using HTTPS or AMQP 1.0 or Kafka 1.0 and later. Event publishers use a Shared Access Signature (SAS) token to identify themselves to an event hub, and can have a unique identity, or use a common SAS token. + +- **Event consumers**: Any entity that reads event data from an event hub is an event consumer. All Event Hubs consumers connect via the AMQP 1.0 session and events are delivered through the session as they become available. The client does not need to poll for data availability. + +- **SAS tokens**: Event Hubs uses Shared Access Signatures, which are available at the namespace and event hub level. A SAS token is generated from a SAS key and is an SHA hash of a URL, encoded in a specific format. Using the name of the key (policy) and the token, Event Hubs can regenerate the hash and thus authenticate the sender. + +For more information about these concepts, see [Features and terminology in Azure Event Hubs](https://docs.microsoft.com/en-ca/azure/event-hubs/event-hubs-features). + +# Examples + +The following sections provide several code snippets covering some of the most common Event Hubs tasks, including: + +- [Send event data](#send-event-data) +- [Receive event data](#receive-event-data) +- [Async send event data](#async-send-event-data) +- [Async receive event data](#async-receive-event-data) + +## Send event data + +Sends an event data and blocks until acknowledgement is received or operation times out. + +```python +import os +from azure.eventhub import EventHubClient, EventData + +connection_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( + os.environ['EVENT_HUB_HOSTNAME'], + os.environ['EVENT_HUB_SAS_POLICY'], + os.environ['EVENT_HUB_SAS_KEY'], + os.environ['EVENT_HUB_NAME']) +client = EventHubClient.from_connection_string(connection_str) +sender = client.create_sender(partition_id="0") + +try: + event_list = [] + for i in range(10): + event_list.append(EventData(b"A single event")) + + with sender: + sender.send(event_list) +except: + raise +finally: + pass +``` + +## Receive event data + +Receive events from the EventHub. + +```python +import os +import logging +from azure.eventhub import EventHubClient, EventData, EventPosition + +connection_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( + os.environ['EVENT_HUB_HOSTNAME'], + os.environ['EVENT_HUB_SAS_POLICY'], + os.environ['EVENT_HUB_SAS_KEY'], + os.environ['EVENT_HUB_NAME']) +client = EventHubClient.from_connection_string(connection_str) +receiver = client.create_receiver(partition_id="0", consumer_group="$default", event_position=EventPosition.new_events_only()) + +try: + logger = logging.getLogger("azure.eventhub") + with receiver: + received = receiver.receive(max_batch_size=100, timeout=5) + for event_data in received: + logger.info("Message received:{}".format(event_data.body_as_str())) +except: + raise +finally: + pass +``` + +## Async send event data + +Sends an event data and asynchronously. + +```python +import os +from azure.eventhub.aio import EventHubClient +from azure.eventhub import EventData + +connection_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( + os.environ['EVENT_HUB_HOSTNAME'], + os.environ['EVENT_HUB_SAS_POLICY'], + os.environ['EVENT_HUB_SAS_KEY'], + os.environ['EVENT_HUB_NAME']) +client = EventHubClient.from_connection_string(connection_str) +sender = client.create_sender(partition_id="0") + +try: + event_list = [] + for i in range(10): + event_list.append(EventData(b"A single event")) + + async with sender: + await sender.send(event_list) +except: + raise +finally: + pass +``` + +## Async receive event data + +Receive events asynchronously from the EventHub. + +```python +import os +import logging +from azure.eventhub.aio import EventHubClient +from azure.eventhub import EventData, EventPosition + +connection_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( + os.environ['EVENT_HUB_HOSTNAME'], + os.environ['EVENT_HUB_SAS_POLICY'], + os.environ['EVENT_HUB_SAS_KEY'], + os.environ['EVENT_HUB_NAME']) +client = EventHubClient.from_connection_string(connection_str) +receiver = client.create_receiver(partition_id="0", consumer_group="$default", event_position=EventPosition.new_events_only()) + +try: + logger = logging.getLogger("azure.eventhub") + async with receiver: + received = await receiver.receive(max_batch_size=100, timeout=5) + for event_data in received: + logger.info("Message received:{}".format(event_data.body_as_str())) +except: + raise +finally: + pass +``` + +# Troubleshooting + +## General + +The Event Hubs APIs generate the following exceptions. + +- **AuthenticationError:** Failed to authenticate because of wrong address, SAS policy/key pair, SAS token or azure identity. +- **ConnectError:** Failed to connect to the EventHubs. The AuthenticationError is a type of ConnectError. +- **ConnectionLostError:** Lose connection after a connection has been built. +- **EventDataError:** The EventData to be sent fails data validation. +For instance, this error is raised if you try to send an EventData that is already sent. +- **EventDataSendError:** The Eventhubs service responds with an error when an EventData is sent. +- **EventHubError:** All other Eventhubs related errors. It is also the root error class of all the above mentioned errors. + +# Next steps + +## Examples + +- ./examples/send.py - use sender to publish events +- ./examples/recv.py - use receiver to read events +- ./examples/send_async.py - async/await support of a sender +- ./examples/recv_async.py - async/await support of a receiver +- ./examples/eph.py - event processor host + +## Documentation + +Reference documentation is available at https://docs.microsoft.com/python/api/azure-eventhub. + +## Logging + +- Enable 'azure.eventhub' logger to collect traces from the library. +- Enable 'uamqp' logger to collect traces from the underlying uAMQP library. +- Enable AMQP frame level trace by setting `network_tracing=True` when creating the client. + +## Provide Feedback + +If you encounter any bugs or have suggestions, please file an issue in the [Issues](https://github.com/Azure/azure-sdk-for-python/issues) section of the project. + +# Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs/README.rst b/sdk/eventhub/azure-eventhubs/README.rst deleted file mode 100644 index d6fcf10b1373..000000000000 --- a/sdk/eventhub/azure-eventhubs/README.rst +++ /dev/null @@ -1,229 +0,0 @@ -Azure Event Hubs client library for Python -========================================== - -Azure Event Hubs is a big data streaming platform and event ingestion service. It can receive and process millions of events per second. - -Use the Event Hubs client library for Python to: - -- Publish events to the Event Hubs service through a sender. -- Read events from the Event Hubs service through a receiver. - -On Python 3.5 and above, it also includes: - -- An async sender and receiver that supports async/await methods. -- An Event Processor Host module that manages the distribution of partition readers. - -`Source code `__ | `Package (PyPi) `__ | `API reference documentation `__ | `Product documentation `__ - -Getting started -=============== - -Install the package -------------------- - -Install the Azure Event Hubs client library for Python with pip: - -.. code:: shell - - $ pip install azure-eventhub - -Prerequisites -+++++++++++++ - -- An Azure subscription. -- Python 3.4 or later. -- An existing Event Hubs namespace and event hub. You can create these entities by following the instructions in `this article `__. - -Authenticate the client ------------------------ - -Interaction with Event Hubs starts with an instance of the EventHubClient class. You need the host name, sas policy name, sas key and event hub name to instantiate the client object. - -Get credentials -+++++++++++++++ - -You can find credential information in `Azure Portal `__. - -Create client -+++++++++++++ - -There are several ways to instantiate the EventHubClient object and the following code snippets demonstrate one way: - -.. code:: python - - import os - from azure.eventhub import EventHubClient - - connection_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};EntityPath={}".format( - os.environ['EVENT_HUB_HOSTNAME'], - os.environ['EVENT_HUB_SAS_POLICY'], - os.environ['EVENT_HUB_SAS_KEY'], - os.environ['EVENT_HUB_NAME']) - client = EventHubClient.from_connection_string(connection_str) - -Key concepts -============ - -- **Namespace:** An Event Hubs namespace provides a unique scoping container, referenced by its fully qualified domain name, in which you create one or more event hubs or Kafka topics. - -- **Event publishers**: Any entity that sends data to an event hub is an event producer, or event publisher. Event publishers can publish events using HTTPS or AMQP 1.0 or Kafka 1.0 and later. Event publishers use a Shared Access Signature (SAS) token to identify themselves to an event hub, and can have a unique identity, or use a common SAS token. - -- **Event consumers**: Any entity that reads event data from an event hub is an event consumer. All Event Hubs consumers connect via the AMQP 1.0 session and events are delivered through the session as they become available. The client does not need to poll for data availability. - -- **SAS tokens**: Event Hubs uses Shared Access Signatures, which are available at the namespace and event hub level. A SAS token is generated from a SAS key and is an SHA hash of a URL, encoded in a specific format. Using the name of the key (policy) and the token, Event Hubs can regenerate the hash and thus authenticate the sender. - -For more information about these concepts, see `Features and terminology in Azure Event Hubs `__. - -Examples -======== - -The following sections provide several code snippets covering some of the most common Event Hubs tasks, including: - -- `Send event data`_ -- `Receive event data`_ -- `Async send event data`_ -- `Async receive event data`_ - -.. _`Send event data`: - -Send event data ---------------- - -Sends an event data and blocks until acknowledgement is received or operation times out. - -.. code:: python - - client = EventHubClient.from_connection_string(connection_str) - sender = client.add_sender(partition="0") - try: - client.run() - event_data = EventData(b"A single event") - sender.send(event_data) - except: - raise - finally: - client.stop() - -.. _`Receive event data`: - -Receive event data ------------------- - -Receive events from the EventHub. - -.. code:: python - - client = EventHubClient.from_connection_string(connection_str) - receiver = client.add_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) - try: - client.run() - logger = logging.getLogger("azure.eventhub") - received = receiver.receive(timeout=5, max_batch_size=100) - for event_data in received: - logger.info("Message received:{}".format(event_data.body_as_str())) - except: - raise - finally: - client.stop() - -.. _`Async send event data`: - -Async send event data ---------------------- - -Sends an event data and asynchronously waits until acknowledgement is received or operation times out. - -.. code:: python - - client = EventHubClientAsync.from_connection_string(connection_str) - sender = client.add_async_sender(partition="0") - try: - await client.run_async() - event_data = EventData(b"A single event") - await sender.send(event_data) - except: - raise - finally: - await client.stop_async() - -.. _`Async receive event data`: - -Async receive event data ------------------------- - -Receive events asynchronously from the EventHub. - -.. code:: python - - client = EventHubClientAsync.from_connection_string(connection_str) - receiver = client.add_async_receiver(consumer_group="$default", partition="0", offset=Offset('@latest')) - try: - await client.run_async() - logger = logging.getLogger("azure.eventhub") - received = await receiver.receive(timeout=5) - for event_data in received: - logger.info("Message received:{}".format(event_data.body_as_str())) - except: - raise - finally: - await client.stop_async() - -Troubleshooting -=============== - -General -------- - -The Event Hubs APIs generate exceptions that can fall into the following categories, along with the associated action you can take to try to fix them. - -- **User coding error:** System.ArgumentException, System.InvalidOperationException, System.OperationCanceledException, System.Runtime.Serialization.SerializationException. General action: try to fix the code before proceeding. -- **Setup/configuration error:** Microsoft.ServiceBus.Messaging.MessagingEntityNotFoundException, Microsoft.Azure.EventHubs.MessagingEntityNotFoundException, System.UnauthorizedAccessException. General action: review your configuration and change if necessary. -- **Transient exceptions:** Microsoft.ServiceBus.Messaging.MessagingException, Microsoft.ServiceBus.Messaging.ServerBusyException, Microsoft.Azure.EventHubs.ServerBusyException, Microsoft.ServiceBus.Messaging.MessagingCommunicationException. General action: retry the operation or notify users. -- **Other exceptions:** System.Transactions.TransactionException, System.TimeoutException, Microsoft.ServiceBus.Messaging.MessageLockLostException, Microsoft.ServiceBus.Messaging.SessionLockLostException. General action: specific to the exception type; refer to the table in `Event Hubs messaging exceptions `__. - -For more detailed infromation about excpetions and how to deal with them , see `Event Hubs messaging exceptions `__. - -Next steps -========== - -Examples --------- - -- ./examples/send.py - use sender to publish events -- ./examples/recv.py - use receiver to read events -- ./examples/send_async.py - async/await support of a sender -- ./examples/recv_async.py - async/await support of a receiver -- ./examples/eph.py - event processor host - -Documentation -------------- -Reference documentation is available at `docs.microsoft.com/python/api/azure-eventhub `__. - -Logging -------- - -- enable 'azure.eventhub' logger to collect traces from the library -- enable 'uamqp' logger to collect traces from the underlying uAMQP library -- enable AMQP frame level trace by setting `debug=True` when creating the Client - -Provide Feedback ----------------- - -If you encounter any bugs or have suggestions, please file an issue in the -`Issues `__ -section of the project. - -Contributing -============ - -This project welcomes contributions and suggestions. Most contributions require you to agree to a -Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us -the rights to use your contribution. For details, visit `https://cla.microsoft.com `__. - -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide -a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions -provided by the bot. You will only need to do this once across all repos using our CLA. - -This project has adopted the `Microsoft Open Source Code of Conduct `__. -For more information see the `Code of Conduct FAQ `__ or -contact `opencode@microsoft.com `__ with any additional questions or comments. \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs/setup.py b/sdk/eventhub/azure-eventhubs/setup.py index 170aeccceb87..4eae6a50e048 100644 --- a/sdk/eventhub/azure-eventhubs/setup.py +++ b/sdk/eventhub/azure-eventhubs/setup.py @@ -30,7 +30,7 @@ if not version: raise RuntimeError('Cannot find version information') -with open('README.rst') as f: +with open('README.md') as f: readme = f.read() with open('HISTORY.rst') as f: history = f.read() From edeaffd594a020388edef60f4051e7736888327a Mon Sep 17 00:00:00 2001 From: Yunhao Ling <47871814+yunhaoling@users.noreply.github.com> Date: Mon, 17 Jun 2019 15:39:10 -0700 Subject: [PATCH 08/54] Update history and setup (#5902) --- .../{HISTORY.rst => HISTORY.md} | 81 +++++++------------ sdk/eventhub/azure-eventhubs/setup.py | 3 +- 2 files changed, 32 insertions(+), 52 deletions(-) rename sdk/eventhub/azure-eventhubs/{HISTORY.rst => HISTORY.md} (77%) diff --git a/sdk/eventhub/azure-eventhubs/HISTORY.rst b/sdk/eventhub/azure-eventhubs/HISTORY.md similarity index 77% rename from sdk/eventhub/azure-eventhubs/HISTORY.rst rename to sdk/eventhub/azure-eventhubs/HISTORY.md index 1914d356cd43..6103bbefdddb 100644 --- a/sdk/eventhub/azure-eventhubs/HISTORY.rst +++ b/sdk/eventhub/azure-eventhubs/HISTORY.md @@ -1,37 +1,29 @@ -.. :changelog: +# Release History -Release History -=============== - -2.0.0-preview.1 (2019-06-17) ----------------------------- +## 2.0.0-preview.1 (2019-06-17) - Added more configuration parameters when creating EventHubClient. - New error hierarchy - - - `azure.error.EventHubError` - - `azure.error.ConnectionLostError` - - `azure.error.ConnectError` - - `azure.error.AuthenticationError` - - `azure.error.EventDataError` - - `azure.error.EventDataSendError` - -- Renamed Sender/Receiver to EventSender/EventReceiver New APIs for creating EventSender/EventReceiver. + - `azure.error.EventHubError` + - `azure.error.ConnectionLostError` + - `azure.error.ConnectError` + - `azure.error.AuthenticationError` + - `azure.error.EventDataError` + - `azure.error.EventDataSendError` +- Renamed Sender/Receiver to EventSender/EventReceiver + - New APIs for creating EventSender/EventReceiver. + - EventReceiver is now iterable. - Rename class azure.eventhub.Offset to azure.eventhub.EventPosition -- Reorganzied connection management, EventHubClient is no longer responsible for opening/closing EventSender/EventReceiver. - - - Each EventSender/EventReceiver is responsible for its own connection management. - - Added support for context manager on EventSender and EventReceiver. - +- Reorganized connection management, EventHubClient is no longer responsible for opening/closing EventSender/EventReceiver. + - Each EventSender/EventReceiver is responsible for its own connection management. + - Added support for context manager on EventSender and EventReceiver. - Reorganized async APIs into "azure.eventhub.aio" namespace and rename to drop the "_async" suffix. -- EventReceiver is now iteratable. - Added support for authentication using azure-core credential. -- Added support for transport using AMQP over Websocket. +- Added support for transport using AMQP over WebSocket. - Updated uAMQP dependency to 1.2.0 -1.3.1 (2019-02-28) ------------------- +## 1.3.1 (2019-02-28) **BugFixes** @@ -39,38 +31,33 @@ Release History - Fixed stackoverflow error in continuous connection reconnect attempts. -1.3.0 (2019-01-29) ------------------- +## 1.3.0 (2019-01-29) -**Bugfixes** +**BugFixes** - Added support for auto reconnect on token expiration and other auth errors (issue #89). **Features** - Added ability to create ServiceBusClient from an existing SAS auth token, including - provding a function to auto-renew that token on expiry. + providing a function to auto-renew that token on expiry. - Added support for storing a custom EPH context value in checkpoint (PR #84, thanks @konstantinmiller) -1.2.0 (2018-11-29) ------------------- +## 1.2.0 (2018-11-29) - Support for Python 2.7 in azure.eventhub module (azure.eventprocessorhost will not support Python 2.7). - Parse EventData.enqueued_time as a UTC timestamp (issue #72, thanks @vjrantal) -1.1.1 (2018-10-03) ------------------- +## 1.1.1 (2018-10-03) - Fixed bug in Azure namespace package. -1.1.0 (2018-09-21) ------------------- +## 1.1.0 (2018-09-21) - Changes to `AzureStorageCheckpointLeaseManager` parameters to support other connection options (issue #61): - - The `storage_account_name`, `storage_account_key` and `lease_container_name` arguments are now optional keyword arguments. - Added a `sas_token` argument that must be specified with `storage_account_name` in place of `storage_account_key`. - Added an `endpoint_suffix` argument to support storage endpoints in National Clouds. @@ -81,8 +68,7 @@ Release History - Added convenience methods `body_as_str` and `body_as_json` to EventData object for easier processing of message data. -1.0.0 (2018-08-22) ------------------- +## 1.0.0 (2018-08-22) - API stable. - Renamed internal `_async` module to `async_ops` for docs generation. @@ -93,8 +79,7 @@ Release History - Reformatted logging for performance. -0.2.0 (2018-08-06) ------------------- +## 0.2.0 (2018-08-06) - Stability improvements for EPH. - Updated uAMQP version. @@ -109,8 +94,7 @@ Release History - `EPHOptions.auto_reconnect_on_error` -0.2.0rc2 (2018-07-29) ---------------------- +## 0.2.0rc2 (2018-07-29) - **Breaking change** `EventData.offset` will now return an object of type `~uamqp.common.Offset` rather than str. The original string value can be retrieved from `~uamqp.common.Offset.value`. @@ -122,8 +106,7 @@ Release History - Added keep-alive thread for maintaining an unused connection. -0.2.0rc1 (2018-07-06) ---------------------- +## 0.2.0rc1 (2018-07-06) - **Breaking change** Restructured library to support Python 3.7. Submodule `async` has been renamed and all classes from this module can now be imported from azure.eventhub directly. @@ -137,8 +120,7 @@ Release History - Dropped Python 2.7 wheel support. -0.2.0b2 (2018-05-29) --------------------- +## 0.2.0b2 (2018-05-29) - Added `namespace_suffix` to EventHubConfig() to support national clouds. - Added `device_id` attribute to EventData to support IoT Hub use cases. @@ -146,20 +128,17 @@ Release History - Updated uAMQP dependency to vRC1. -0.2.0b1 (2018-04-20) --------------------- +## 0.2.0b1 (2018-04-20) - Updated uAMQP to latest version. - Further testing and minor bug fixes. -0.2.0a2 (2018-04-02) --------------------- +## 0.2.0a2 (2018-04-02) - Updated uAQMP dependency. -0.2.0a1 (unreleased) --------------------- +## 0.2.0a1 (unreleased) - Swapped out Proton dependency for uAMQP. \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs/setup.py b/sdk/eventhub/azure-eventhubs/setup.py index 4eae6a50e048..40c11892fd64 100644 --- a/sdk/eventhub/azure-eventhubs/setup.py +++ b/sdk/eventhub/azure-eventhubs/setup.py @@ -32,7 +32,7 @@ with open('README.md') as f: readme = f.read() -with open('HISTORY.rst') as f: +with open('HISTORY.md') as f: history = f.read() exclude_packages = [ @@ -56,6 +56,7 @@ version=version, description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME), long_description=readme + '\n\n' + history, + long_description_content_type='text/markdown', license='MIT License', author='Microsoft Corporation', author_email='azpysdkhelp@microsoft.com', From 106976f7ec17f5806e4422a0a06295e32193c785 Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 17 Jun 2019 17:26:55 -0700 Subject: [PATCH 09/54] EventPosition.first_available_event -> earliest new_events_only -> latest --- sdk/eventhub/azure-eventhubs/azure/eventhub/common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index 80469a229e2d..5beba6e4593f 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -319,7 +319,7 @@ def _selector(self): return ("amqp.annotation.x-opt-offset {} '{}'".format(operator, self.value)).encode('utf-8') @classmethod - def first_available_event(cls): + def earliest(cls): """ Get the beginning of the event stream. @@ -329,7 +329,7 @@ def first_available_event(cls): return cls("-1") @classmethod - def new_events_only(cls): + def latest(cls): """ Get the end of the event stream. From 72e420e98a4b226e480c20555589dc9703159db8 Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 17 Jun 2019 17:27:59 -0700 Subject: [PATCH 10/54] Change EventSender's event_position to be mandatory --- .../eventhub/aio/event_hubs_client_async.py | 2 +- .../azure-eventhubs/azure/eventhub/client.py | 2 +- .../azure/eventhub/client_abstract.py | 2 +- sdk/eventhub/azure-eventhubs/conftest.py | 2 +- .../azure-eventhubs/examples/iothub_recv.py | 4 +- sdk/eventhub/azure-eventhubs/examples/recv.py | 2 +- .../azure-eventhubs/examples/recv_batch.py | 2 +- .../azure-eventhubs/examples/recv_epoch.py | 4 +- .../examples/test_examples_eventhub.py | 2 +- .../tests/asynctests/test_auth_async.py | 2 +- .../asynctests/test_iothub_receive_async.py | 4 +- .../tests/asynctests/test_negative_async.py | 10 ++-- .../tests/asynctests/test_receive_async.py | 52 +++++++++---------- .../azure-eventhubs/tests/test_auth.py | 2 +- .../tests/test_iothub_receive.py | 4 +- .../azure-eventhubs/tests/test_negative.py | 10 ++-- .../azure-eventhubs/tests/test_receive.py | 6 +-- 17 files changed, 55 insertions(+), 57 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py index 2a006373fccd..68de439d7828 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py @@ -176,7 +176,7 @@ async def get_partition_properties(self, partition): await mgmt_client.close_async() def create_receiver( - self, partition_id, consumer_group="$Default", event_position=EventPosition.first_available_event(), exclusive_receiver_priority=None, + self, partition_id, event_position, consumer_group="$Default", exclusive_receiver_priority=None, operation=None, prefetch=None, loop=None): """ Create an async receiver to the client for a particular consumer group and partition. diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index f77dccf19203..ae20fb00aa38 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -188,7 +188,7 @@ def get_partition_properties(self, partition): mgmt_client.close() def create_receiver( - self, partition_id, consumer_group="$Default", event_position=EventPosition.first_available_event(), + self, partition_id, event_position, consumer_group="$Default", exclusive_receiver_priority=None, operation=None, prefetch=None, ): # type: (str, str, EventPosition, int, str, int) -> EventReceiver diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 5534a848c640..15acb45e8057 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -316,7 +316,7 @@ def _process_redirect_uri(self, redirect): @abstractmethod def create_receiver( - self, partition_id, consumer_group="$Default", event_position=None, exclusive_receiver_priority=None, + self, partition_id, event_position, consumer_group="$Default", exclusive_receiver_priority=None, operation=None, prefetch=None, ): diff --git a/sdk/eventhub/azure-eventhubs/conftest.py b/sdk/eventhub/azure-eventhubs/conftest.py index a23dc86be6a8..ef78eb573292 100644 --- a/sdk/eventhub/azure-eventhubs/conftest.py +++ b/sdk/eventhub/azure-eventhubs/conftest.py @@ -176,7 +176,7 @@ def connstr_receivers(connection_str): partitions = client.get_partition_ids() receivers = [] for p in partitions: - receiver = client.create_receiver(partition_id=p, prefetch=500, event_position=EventPosition("-1")) + receiver = client.create_receiver(partition_id=p, event_position=EventPosition("-1"), prefetch=500) receiver._open() receivers.append(receiver) yield connection_str, receivers diff --git a/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py b/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py index 7961abef5c0a..7b2fc9ed5dcc 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py +++ b/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py @@ -11,7 +11,7 @@ import os import logging -from azure.eventhub import EventHubClient +from azure.eventhub import EventHubClient, EventPosition logger = logging.getLogger('azure.eventhub') @@ -19,7 +19,7 @@ iot_connection_str = 'HostName=iothubfortrack2py.azure-devices.net;SharedAccessKeyName=iothubowner;SharedAccessKey=glF9a2n0D9fgmWpfTqjjmvkYt0WaTNqZx9GV/UKwDkQ=' # os.environ['IOTHUB_CONNECTION_STR'] client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) -receiver = client.create_receiver(partition_id="0", operation='/messages/events') +receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') with receiver: received = receiver.receive(timeout=5) print(received) diff --git a/sdk/eventhub/azure-eventhubs/examples/recv.py b/sdk/eventhub/azure-eventhubs/examples/recv.py index 0e85dcb5fb39..a2ca9a32e6ee 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv.py @@ -33,7 +33,7 @@ network_tracing=False) try: - receiver = client.create_receiver(partition_id=PARTITION, prefetch=5000, event_position=EVENT_POSITION) + receiver = client.create_receiver(partition_id=PARTITION, event_position=EVENT_POSITION, prefetch=5000) with receiver: start_time = time.time() batch = receiver.receive(timeout=5000) diff --git a/sdk/eventhub/azure-eventhubs/examples/recv_batch.py b/sdk/eventhub/azure-eventhubs/examples/recv_batch.py index 0b270454828f..da451343d564 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv_batch.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv_batch.py @@ -34,7 +34,7 @@ client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) try: - receiver = client.create_receiver(partition_id=PARTITION, prefetch=100, event_position=EVENT_POSITION) + receiver = client.create_receiver(partition_id=PARTITION, event_position=EVENT_POSITION, prefetch=100) with receiver: batched_events = receiver.receive(max_batch_size=10) for event_data in batched_events: diff --git a/sdk/eventhub/azure-eventhubs/examples/recv_epoch.py b/sdk/eventhub/azure-eventhubs/examples/recv_epoch.py index 3f82202dbd5b..c0cc9b341174 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv_epoch.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv_epoch.py @@ -15,7 +15,7 @@ import asyncio from azure.eventhub.aio import EventHubClient -from azure.eventhub import EventHubSharedKeyCredential +from azure.eventhub import EventHubSharedKeyCredential, EventPosition import examples logger = examples.get_logger(logging.INFO) @@ -31,7 +31,7 @@ async def pump(client, exclusive_receiver_priority): - receiver = client.create_receiver(partition_id=PARTITION, exclusive_receiver_priority=exclusive_receiver_priority) + receiver = client.create_receiver(partition_id=PARTITION, event_position=EventPosition("-1"), exclusive_receiver_priority=exclusive_receiver_priority) async with receiver: total = 0 start_time = time.time() diff --git a/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py b/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py index fd1c1ba1773e..2c70c735d4d3 100644 --- a/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py +++ b/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py @@ -68,7 +68,7 @@ def test_example_eventhub_sync_send_and_receive(live_eventhub_config): # Create a receiver. receiver = client.create_receiver(partition_id="0", consumer_group="$default", event_position=EventPosition('@latest')) # Create an exclusive receiver object. - exclusive_receiver = client.create_receiver(partition_id="0", exclusive_receiver_priority=1) + exclusive_receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), exclusive_receiver_priority=1) # [END create_eventhub_client_receiver] client = EventHubClient.from_connection_string(connection_str) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_auth_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_auth_async.py index 4799df1a8634..5d135ef54506 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_auth_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_auth_async.py @@ -27,7 +27,7 @@ async def test_client_secret_credential_async(aad_credential, live_eventhub): credential=credential, user_agent='customized information') sender = client.create_sender(partition_id='0') - receiver = client.create_receiver(partition_id='0', event_position=EventPosition.new_events_only()) + receiver = client.create_receiver(partition_id='0', event_position=EventPosition.latest()) async with receiver: diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py index c7ebd17a9fbc..2456525f344a 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py @@ -25,7 +25,7 @@ async def pump(receiver, sleep=None): async def get_partitions(iot_connection_str): client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) - receiver = client.create_receiver(partition_id="0", prefetch=1000, operation='/messages/events') + receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), prefetch=1000, operation='/messages/events') async with receiver: partitions = await client.get_properties() return partitions["partition_ids"] @@ -39,7 +39,7 @@ async def test_iothub_receive_multiple_async(iot_connection_str): client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) receivers = [] for p in partitions: - receivers.append(client.create_receiver(partition_id=p, prefetch=10, operation='/messages/events')) + receivers.append(client.create_receiver(partition_id=p, event_position=EventPosition("-1"), prefetch=10, operation='/messages/events')) outputs = await asyncio.gather(*[pump(r) for r in receivers]) assert isinstance(outputs[0], int) and outputs[0] <= 10 diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py index 75f168027644..b7443f4597b9 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py @@ -36,7 +36,7 @@ async def test_send_with_invalid_hostname_async(invalid_hostname, connstr_receiv @pytest.mark.asyncio async def test_receive_with_invalid_hostname_async(invalid_hostname): client = EventHubClient.from_connection_string(invalid_hostname, network_tracing=True) - sender = client.create_receiver(partition_id="0") + sender = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): await sender._open() @@ -55,7 +55,7 @@ async def test_send_with_invalid_key_async(invalid_key, connstr_receivers): @pytest.mark.asyncio async def test_receive_with_invalid_key_async(invalid_key): client = EventHubClient.from_connection_string(invalid_key, network_tracing=True) - sender = client.create_receiver(partition_id="0") + sender = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): await sender._open() @@ -74,7 +74,7 @@ async def test_send_with_invalid_policy_async(invalid_policy, connstr_receivers) @pytest.mark.asyncio async def test_receive_with_invalid_policy_async(invalid_policy): client = EventHubClient.from_connection_string(invalid_policy, network_tracing=True) - sender = client.create_receiver(partition_id="0") + sender = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): await sender._open() @@ -107,7 +107,7 @@ async def test_non_existing_entity_sender_async(connection_str): @pytest.mark.asyncio async def test_non_existing_entity_receiver_async(connection_str): client = EventHubClient.from_connection_string(connection_str, event_hub_path="nemo", network_tracing=False) - receiver = client.create_receiver(partition_id="0") + receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): await receiver._open() @@ -118,7 +118,7 @@ async def test_receive_from_invalid_partitions_async(connection_str): partitions = ["XYZ", "-1", "1000", "-" ] for p in partitions: client = EventHubClient.from_connection_string(connection_str, network_tracing=True) - receiver = client.create_receiver(partition_id=p) + receiver = client.create_receiver(partition_id=p, event_position=EventPosition("-1")) try: with pytest.raises(ConnectError): await receiver.receive(timeout=10) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py index 8f8457904af6..1fc79d7ca17a 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py @@ -147,7 +147,7 @@ async def test_receive_with_inclusive_sequence_no_async(connstr_senders): async def test_receive_batch_async(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", prefetch=500, event_position=EventPosition('@latest')) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest'), prefetch=500) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 @@ -179,8 +179,8 @@ async def test_exclusive_receiver_async(connstr_senders): senders[0].send(EventData(b"Receiving only a single event")) client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver1 = client.create_receiver(partition_id="0", exclusive_receiver_priority=10, prefetch=5) - receiver2 = client.create_receiver(partition_id="0", exclusive_receiver_priority=20, prefetch=10) + receiver1 = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), exclusive_receiver_priority=10, prefetch=5) + receiver2 = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), exclusive_receiver_priority=20, prefetch=10) try: await pump(receiver1) output2 = await pump(receiver2) @@ -203,7 +203,7 @@ async def test_multiple_receiver_async(connstr_senders): assert partitions["partition_ids"] == ["0", "1"] receivers = [] for i in range(2): - receivers.append(client.create_receiver(partition_id="0", prefetch=10)) + receivers.append(client.create_receiver(partition_id="0", event_position=EventPosition("-1"), prefetch=10)) try: more_partitions = await client.get_properties() assert more_partitions["partition_ids"] == ["0", "1"] @@ -224,8 +224,8 @@ async def test_exclusive_receiver_after_non_exclusive_receiver_async(connstr_sen senders[0].send(EventData(b"Receiving only a single event")) client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver1 = client.create_receiver(partition_id="0", prefetch=10) - receiver2 = client.create_receiver(partition_id="0", exclusive_receiver_priority=15, prefetch=10) + receiver1 = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), prefetch=10) + receiver2 = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), exclusive_receiver_priority=15, prefetch=10) try: await pump(receiver1) output2 = await pump(receiver2) @@ -244,8 +244,8 @@ async def test_non_exclusive_receiver_after_exclusive_receiver_async(connstr_sen senders[0].send(EventData(b"Receiving only a single event")) client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver1 = client.create_receiver(partition_id="0", exclusive_receiver_priority=15, prefetch=10) - receiver2 = client.create_receiver(partition_id="0", prefetch=10) + receiver1 = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), exclusive_receiver_priority=15, prefetch=10) + receiver2 = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), prefetch=10) try: output1 = await pump(receiver1) with pytest.raises(ConnectError): @@ -275,18 +275,17 @@ def batched(): yield ed client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", prefetch=500, event_position=EventPosition('@latest')) - - received = await receiver.receive(timeout=5) - assert len(received) == 0 + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest'), prefetch=500) + async with receiver: + received = await receiver.receive(timeout=5) + assert len(received) == 0 - senders[0].send(batched()) + senders[0].send(batched()) - await asyncio.sleep(1) + await asyncio.sleep(1) - received = await receiver.receive(max_batch_size=15, timeout=5) - assert len(received) == 15 - await receiver.close() + received = await receiver.receive(max_batch_size=15, timeout=5) + assert len(received) == 15 for index, message in enumerate(received): assert list(message.body)[0] == "Event Data {}".format(index).encode('utf-8') @@ -299,21 +298,20 @@ def batched(): async def test_receive_over_websocket_async(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, transport_type=TransportType.AmqpOverWebsocket, network_tracing=False) - receiver = client.create_receiver(partition_id="0", prefetch=500, event_position=EventPosition('@latest')) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest'), prefetch=500) event_list = [] for i in range(20): event_list.append(EventData("Event Number {}".format(i))) - received = await receiver.receive(timeout=5) - assert len(received) == 0 - - with senders[0]: - senders[0].send(event_list) + async with receiver: + received = await receiver.receive(timeout=5) + assert len(received) == 0 - time.sleep(1) + with senders[0]: + senders[0].send(event_list) - received = await receiver.receive(max_batch_size=50, timeout=5) - assert len(received) == 20 + time.sleep(1) - await receiver.close() + received = await receiver.receive(max_batch_size=50, timeout=5) + assert len(received) == 20 diff --git a/sdk/eventhub/azure-eventhubs/tests/test_auth.py b/sdk/eventhub/azure-eventhubs/tests/test_auth.py index aa728d2d54de..c4f28f1fe0ac 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_auth.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_auth.py @@ -23,7 +23,7 @@ def test_client_secret_credential(aad_credential, live_eventhub): credential=credential, user_agent='customized information') sender = client.create_sender(partition_id='0') - receiver = client.create_receiver(partition_id='0', event_position=EventPosition.new_events_only()) + receiver = client.create_receiver(partition_id='0', event_position=EventPosition.latest()) with receiver: received = receiver.receive(timeout=1) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py index 82add37ef868..9ece98d9a3bc 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py @@ -8,14 +8,14 @@ import pytest import time -from azure.eventhub import EventData, EventHubClient +from azure.eventhub import EventData, EventPosition, EventHubClient @pytest.mark.liveTest def test_iothub_receive_sync(iot_connection_str, device_id): pytest.skip("current code will cause ErrorCodes.LinkRedirect") client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) - receiver = client.create_receiver(partition_id="0", operation='/messages/events') + receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') receiver._open() try: partitions = client.get_properties() diff --git a/sdk/eventhub/azure-eventhubs/tests/test_negative.py b/sdk/eventhub/azure-eventhubs/tests/test_negative.py index 7111840995f9..b88f1c15eb6e 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_negative.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_negative.py @@ -32,7 +32,7 @@ def test_send_with_invalid_hostname(invalid_hostname, connstr_receivers): @pytest.mark.liveTest def test_receive_with_invalid_hostname_sync(invalid_hostname): client = EventHubClient.from_connection_string(invalid_hostname, network_tracing=True) - receiver = client.create_receiver(partition_id="0") + receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): receiver._open() @@ -49,7 +49,7 @@ def test_send_with_invalid_key(invalid_key, connstr_receivers): @pytest.mark.liveTest def test_receive_with_invalid_key_sync(invalid_key): client = EventHubClient.from_connection_string(invalid_key, network_tracing=True) - receiver = client.create_receiver(partition_id="0") + receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): receiver._open() @@ -66,7 +66,7 @@ def test_send_with_invalid_policy(invalid_policy, connstr_receivers): @pytest.mark.liveTest def test_receive_with_invalid_policy_sync(invalid_policy): client = EventHubClient.from_connection_string(invalid_policy, network_tracing=True) - receiver = client.create_receiver(partition_id="0") + receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): receiver._open() @@ -96,7 +96,7 @@ def test_non_existing_entity_sender(connection_str): @pytest.mark.liveTest def test_non_existing_entity_receiver(connection_str): client = EventHubClient.from_connection_string(connection_str, event_hub_path="nemo", network_tracing=False) - receiver = client.create_receiver(partition_id="0") + receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): receiver._open() @@ -106,7 +106,7 @@ def test_receive_from_invalid_partitions_sync(connection_str): partitions = ["XYZ", "-1", "1000", "-" ] for p in partitions: client = EventHubClient.from_connection_string(connection_str, network_tracing=True) - receiver = client.create_receiver(partition_id=p) + receiver = client.create_receiver(partition_id=p, event_position=EventPosition("-1")) try: with pytest.raises(ConnectError): receiver.receive(timeout=10) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_receive.py index c32d748a6ce0..c89d9bc97a77 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_receive.py @@ -205,7 +205,7 @@ def test_receive_with_inclusive_sequence_no(connstr_senders): def test_receive_batch(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", prefetch=500, event_position=EventPosition('@latest')) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest'), prefetch=500) with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 @@ -234,7 +234,7 @@ def batched(): yield ed client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", prefetch=500, event_position=EventPosition('@latest')) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest'), prefetch=500) with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 @@ -256,7 +256,7 @@ def batched(): def test_receive_over_websocket_sync(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, transport_type=TransportType.AmqpOverWebsocket, network_tracing=False) - receiver = client.create_receiver(partition_id="0", prefetch=500, event_position=EventPosition('@latest')) + receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest'), prefetch=500) event_list = [] for i in range(20): From f353da890bb980df6bab9a975e3383302aecc2ff Mon Sep 17 00:00:00 2001 From: yijxie Date: Tue, 18 Jun 2019 10:47:54 -0700 Subject: [PATCH 11/54] Update uamqp shared_req to 1.2.0 --- shared_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared_requirements.txt b/shared_requirements.txt index fdde4e1967c7..4eb03dbe361b 100644 --- a/shared_requirements.txt +++ b/shared_requirements.txt @@ -89,7 +89,7 @@ typing msrest>=0.5.0 msrestazure<2.0.0,>=0.4.32 requests>=2.18.4 -uamqp~=1.1.0 +uamqp~=1.2.0 enum34>=1.0.4 certifi>=2017.4.17 aiohttp>=3.0 From 4df0aaf1546f238452c48bef42e037888d5495c9 Mon Sep 17 00:00:00 2001 From: yijxie Date: Tue, 18 Jun 2019 10:50:27 -0700 Subject: [PATCH 12/54] Disable network_tracing --- sdk/eventhub/azure-eventhubs/conftest.py | 4 ++-- .../azure-eventhubs/examples/iothub_recv.py | 2 +- .../azure-eventhubs/examples/iothub_send.py | 2 +- .../azure-eventhubs/examples/iterator_receiver.py | 2 +- .../tests/asynctests/test_iothub_receive_async.py | 4 ++-- .../asynctests/test_longrunning_send_async.py | 2 +- .../tests/asynctests/test_negative_async.py | 14 +++++++------- .../tests/asynctests/test_receive_async.py | 2 +- .../tests/asynctests/test_reconnect_async.py | 4 ++-- .../azure-eventhubs/tests/test_iothub_receive.py | 2 +- .../azure-eventhubs/tests/test_iothub_send.py | 2 +- .../azure-eventhubs/tests/test_negative.py | 12 ++++++------ sdk/eventhub/azure-eventhubs/tests/test_receive.py | 2 +- .../azure-eventhubs/tests/test_reconnect.py | 4 ++-- sdk/eventhub/azure-eventhubs/tests/test_send.py | 2 +- 15 files changed, 30 insertions(+), 30 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/conftest.py b/sdk/eventhub/azure-eventhubs/conftest.py index ef78eb573292..9b13fa2a61f6 100644 --- a/sdk/eventhub/azure-eventhubs/conftest.py +++ b/sdk/eventhub/azure-eventhubs/conftest.py @@ -172,7 +172,7 @@ def aad_credential(): @pytest.fixture() def connstr_receivers(connection_str): - client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) partitions = client.get_partition_ids() receivers = [] for p in partitions: @@ -187,7 +187,7 @@ def connstr_receivers(connection_str): @pytest.fixture() def connstr_senders(connection_str): - client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) partitions = client.get_partition_ids() senders = [] diff --git a/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py b/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py index 7b2fc9ed5dcc..4ce8771425c6 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py +++ b/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py @@ -18,7 +18,7 @@ iot_connection_str = 'HostName=iothubfortrack2py.azure-devices.net;SharedAccessKeyName=iothubowner;SharedAccessKey=glF9a2n0D9fgmWpfTqjjmvkYt0WaTNqZx9GV/UKwDkQ=' # os.environ['IOTHUB_CONNECTION_STR'] -client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) +client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') with receiver: received = receiver.receive(timeout=5) diff --git a/sdk/eventhub/azure-eventhubs/examples/iothub_send.py b/sdk/eventhub/azure-eventhubs/examples/iothub_send.py index ab4fcb2adec6..255deb6be516 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iothub_send.py +++ b/sdk/eventhub/azure-eventhubs/examples/iothub_send.py @@ -19,7 +19,7 @@ iot_device_id = os.environ['IOTHUB_DEVICE'] iot_connection_str = os.environ['IOTHUB_CONNECTION_STR'] -client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) +client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) try: sender = client.create_sender(operation='/messages/devicebound') with sender: diff --git a/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py b/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py index a8dd0a4b4400..4a22149139b5 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py +++ b/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py @@ -36,7 +36,7 @@ def run(self): client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), - network_tracing=True) + network_tracing=False) receiver = client.create_receiver(partition_id="0", event_position=EVENT_POSITION) with receiver: thread = PartitionReceiverThread(receiver) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py index 2456525f344a..ceb3b05e5033 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py @@ -24,7 +24,7 @@ async def pump(receiver, sleep=None): async def get_partitions(iot_connection_str): - client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) + client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), prefetch=1000, operation='/messages/events') async with receiver: partitions = await client.get_properties() @@ -36,7 +36,7 @@ async def get_partitions(iot_connection_str): async def test_iothub_receive_multiple_async(iot_connection_str): pytest.skip("This will get AuthenticationError. We're investigating...") partitions = await get_partitions(iot_connection_str) - client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) + client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) receivers = [] for p in partitions: receivers.append(client.create_receiver(partition_id=p, event_position=EventPosition("-1"), prefetch=10, operation='/messages/events')) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py index 0cf4c8044f54..30b55af5f6ef 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py @@ -89,7 +89,7 @@ async def test_long_running_partition_send_async(connection_str): if args.conn_str: client = EventHubClient.from_connection_string( args.conn_str, - event_hub_path=args.eventhub, network_tracing=True) + event_hub_path=args.eventhub, network_tracing=False) elif args.address: client = EventHubClient(host=args.address, event_hub_path=args.eventhub, diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py index b7443f4597b9..d55d71585b59 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py @@ -26,7 +26,7 @@ @pytest.mark.asyncio async def test_send_with_invalid_hostname_async(invalid_hostname, connstr_receivers): _, receivers = connstr_receivers - client = EventHubClient.from_connection_string(invalid_hostname, network_tracing=True) + client = EventHubClient.from_connection_string(invalid_hostname, network_tracing=False) sender = client.create_sender() with pytest.raises(AuthenticationError): await sender._open() @@ -35,7 +35,7 @@ async def test_send_with_invalid_hostname_async(invalid_hostname, connstr_receiv @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_with_invalid_hostname_async(invalid_hostname): - client = EventHubClient.from_connection_string(invalid_hostname, network_tracing=True) + client = EventHubClient.from_connection_string(invalid_hostname, network_tracing=False) sender = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): await sender._open() @@ -54,7 +54,7 @@ async def test_send_with_invalid_key_async(invalid_key, connstr_receivers): @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_with_invalid_key_async(invalid_key): - client = EventHubClient.from_connection_string(invalid_key, network_tracing=True) + client = EventHubClient.from_connection_string(invalid_key, network_tracing=False) sender = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): await sender._open() @@ -73,7 +73,7 @@ async def test_send_with_invalid_policy_async(invalid_policy, connstr_receivers) @pytest.mark.liveTest @pytest.mark.asyncio async def test_receive_with_invalid_policy_async(invalid_policy): - client = EventHubClient.from_connection_string(invalid_policy, network_tracing=True) + client = EventHubClient.from_connection_string(invalid_policy, network_tracing=False) sender = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): await sender._open() @@ -83,7 +83,7 @@ async def test_receive_with_invalid_policy_async(invalid_policy): @pytest.mark.asyncio async def test_send_partition_key_with_partition_async(connection_str): pytest.skip("Skipped tentatively. Confirm whether to throw ValueError or just warn users") - client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender(partition_id="1") try: data = EventData(b"Data") @@ -117,7 +117,7 @@ async def test_non_existing_entity_receiver_async(connection_str): async def test_receive_from_invalid_partitions_async(connection_str): partitions = ["XYZ", "-1", "1000", "-" ] for p in partitions: - client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) receiver = client.create_receiver(partition_id=p, event_position=EventPosition("-1")) try: with pytest.raises(ConnectError): @@ -184,7 +184,7 @@ async def pump(receiver): @pytest.mark.asyncio async def test_max_receivers_async(connstr_senders): connection_str, senders = connstr_senders - client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) receivers = [] for i in range(6): receivers.append(client.create_receiver(partition_id="0", prefetch=1000, event_position=EventPosition('@latest'))) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py index 1fc79d7ca17a..55822c9f89d1 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py @@ -198,7 +198,7 @@ async def test_multiple_receiver_async(connstr_senders): connection_str, senders = connstr_senders senders[0].send(EventData(b"Receiving only a single event")) - client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) partitions = await client.get_properties() assert partitions["partition_ids"] == ["0", "1"] receivers = [] diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py index e7a4fcad61f0..f7a39c6503aa 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py @@ -21,7 +21,7 @@ @pytest.mark.asyncio async def test_send_with_long_interval_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() try: await sender.send(EventData(b"A single event")) @@ -58,7 +58,7 @@ def pump(receiver): @pytest.mark.asyncio async def test_send_with_forced_conn_close_async(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() try: await sender.send(EventData(b"A single event")) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py index 9ece98d9a3bc..372ada0bf50d 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py @@ -14,7 +14,7 @@ @pytest.mark.liveTest def test_iothub_receive_sync(iot_connection_str, device_id): pytest.skip("current code will cause ErrorCodes.LinkRedirect") - client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) + client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') receiver._open() try: diff --git a/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py b/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py index 75a39eb185bf..bc2f92d14cdf 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py @@ -16,7 +16,7 @@ @pytest.mark.liveTest def test_iothub_send_single_event(iot_connection_str, device_id): - client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=True) + client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) sender = client.create_sender(operation='/messages/devicebound') try: sender.send(EventData(b"A single event", to_device=device_id)) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_negative.py b/sdk/eventhub/azure-eventhubs/tests/test_negative.py index b88f1c15eb6e..87f23d029aaf 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_negative.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_negative.py @@ -31,7 +31,7 @@ def test_send_with_invalid_hostname(invalid_hostname, connstr_receivers): @pytest.mark.liveTest def test_receive_with_invalid_hostname_sync(invalid_hostname): - client = EventHubClient.from_connection_string(invalid_hostname, network_tracing=True) + client = EventHubClient.from_connection_string(invalid_hostname, network_tracing=False) receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): receiver._open() @@ -48,7 +48,7 @@ def test_send_with_invalid_key(invalid_key, connstr_receivers): @pytest.mark.liveTest def test_receive_with_invalid_key_sync(invalid_key): - client = EventHubClient.from_connection_string(invalid_key, network_tracing=True) + client = EventHubClient.from_connection_string(invalid_key, network_tracing=False) receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): receiver._open() @@ -65,7 +65,7 @@ def test_send_with_invalid_policy(invalid_policy, connstr_receivers): @pytest.mark.liveTest def test_receive_with_invalid_policy_sync(invalid_policy): - client = EventHubClient.from_connection_string(invalid_policy, network_tracing=True) + client = EventHubClient.from_connection_string(invalid_policy, network_tracing=False) receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): receiver._open() @@ -74,7 +74,7 @@ def test_receive_with_invalid_policy_sync(invalid_policy): @pytest.mark.liveTest def test_send_partition_key_with_partition_sync(connection_str): pytest.skip("Skipped tentatively. Confirm whether to throw ValueError or just warn users") - client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender(partition_id="1") try: data = EventData(b"Data") @@ -105,7 +105,7 @@ def test_non_existing_entity_receiver(connection_str): def test_receive_from_invalid_partitions_sync(connection_str): partitions = ["XYZ", "-1", "1000", "-" ] for p in partitions: - client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) receiver = client.create_receiver(partition_id=p, event_position=EventPosition("-1")) try: with pytest.raises(ConnectError): @@ -131,7 +131,7 @@ def test_send_to_invalid_partitions(connection_str): def test_send_too_large_message(connection_str): if sys.platform.startswith('darwin'): pytest.skip("Skipping on OSX - open issue regarding message size") - client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() try: data = EventData(b"A" * 1100000) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_receive.py index c89d9bc97a77..10cf1575cbb8 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_receive.py @@ -14,7 +14,7 @@ # def test_receive_without_events(connstr_senders): # connection_str, senders = connstr_senders -# client = EventHubClient.from_connection_string(connection_str, network_tracing=True) +# client = EventHubClient.from_connection_string(connection_str, network_tracing=False) # receiver = client.create_receiver("$default", "0", event_position=EventPosition('@latest')) # finish = datetime.datetime.now() + datetime.timedelta(seconds=240) # count = 0 diff --git a/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py b/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py index d65f774108e0..ef81d72782f5 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py @@ -19,7 +19,7 @@ @pytest.mark.liveTest def test_send_with_long_interval_sync(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() with sender: sender.send(EventData(b"A single event")) @@ -43,7 +43,7 @@ def test_send_with_long_interval_sync(connstr_receivers): @pytest.mark.liveTest def test_send_with_forced_conn_close_sync(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() with sender: sender.send(EventData(b"A single event")) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_send.py b/sdk/eventhub/azure-eventhubs/tests/test_send.py index 5a497f34dcca..efec47dcf140 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_send.py @@ -165,7 +165,7 @@ def batched(): @pytest.mark.liveTest def test_send_array_sync(connstr_receivers): connection_str, receivers = connstr_receivers - client = EventHubClient.from_connection_string(connection_str, network_tracing=True) + client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_sender() with sender: sender.send(EventData([b"A", b"B", b"C"])) From a8c7a50fd498e02d71bb80303c8a3f22a6685958 Mon Sep 17 00:00:00 2001 From: yijxie Date: Tue, 18 Jun 2019 11:31:05 -0700 Subject: [PATCH 13/54] update uamqp dependency ~=1.2.0 --- sdk/servicebus/azure-servicebus/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/servicebus/azure-servicebus/setup.py b/sdk/servicebus/azure-servicebus/setup.py index c908d609c570..606cf21fe23b 100644 --- a/sdk/servicebus/azure-servicebus/setup.py +++ b/sdk/servicebus/azure-servicebus/setup.py @@ -77,7 +77,7 @@ 'azure', ]), install_requires=[ - 'uamqp~=1.1.0', + 'uamqp~=1.2.0', 'msrestazure>=0.4.32,<2.0.0', 'azure-common~=1.1', ], From daf3d713738f2b62701164fa8ae12a0a1a50b59d Mon Sep 17 00:00:00 2001 From: yijxie Date: Wed, 19 Jun 2019 10:39:23 -0700 Subject: [PATCH 14/54] Remove EventPosition helper functions --- .../azure-eventhubs/azure/eventhub/common.py | 62 ------------------- 1 file changed, 62 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index 5beba6e4593f..df526d83da93 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -318,68 +318,6 @@ def _selector(self): return ("amqp.annotation.x-opt-sequence-number {} '{}'".format(operator, self.value)).encode('utf-8') return ("amqp.annotation.x-opt-offset {} '{}'".format(operator, self.value)).encode('utf-8') - @classmethod - def earliest(cls): - """ - Get the beginning of the event stream. - - :rtype: azure.eventhub.common.EventPosition - """ - - return cls("-1") - - @classmethod - def latest(cls): - """ - Get the end of the event stream. - - :rtype: azure.eventhub.common.EventPosition - """ - - return cls("@latest") - - @classmethod - def from_offset(cls, offset, inclusive=False): - """ - Get the event position from/after the specified offset. - - :param offset: the offset value - :type offset: str - :param inclusive: Whether to include the supplied value as the start point. - :type inclusive: bool - :rtype: azure.eventhub.common.EventPosition - """ - - return cls(offset, inclusive) - - @classmethod - def from_sequence(cls, sequence, inclusive=False): - """ - Get the event position from/after the specified sequence number. - - :param sequence: the sequence number - :type sequence: int, long - :param inclusive: Whether to include the supplied value as the start point. - :type inclusive: bool - :rtype: azure.eventhub.common.EventPosition - """ - - return cls(sequence, inclusive) - - @classmethod - def from_enqueued_time(cls, enqueued_time, inclusive=False): - """ - Get the event position from/after the specified enqueue time. - - :param enqueued_time: the enqueue datetime - :type enqueued_time: datetime.datetime - :param inclusive: Whether to include the supplied value as the start point. - :type inclusive: bool - :rtype: azure.eventhub.common.EventPosition - """ - - return cls(enqueued_time, inclusive) - # TODO: move some behaviors to these two classes. class EventHubSASTokenCredential(object): From fc0ce067eb544fe2d7d7d06893ea72335015dfeb Mon Sep 17 00:00:00 2001 From: yijxie Date: Wed, 19 Jun 2019 11:09:20 -0700 Subject: [PATCH 15/54] Names changed to EventHubConsumer/Producer --- sdk/eventhub/azure-eventhubs/HISTORY.md | 14 +-- sdk/eventhub/azure-eventhubs/README.md | 14 +-- .../azure/eventhub/__init__.py | 10 +- .../azure/eventhub/aio/__init__.py | 8 +- .../eventhub/aio/event_hubs_client_async.py | 32 +++---- .../azure/eventhub/aio/receiver_async.py | 89 +++++++++-------- .../azure/eventhub/aio/sender_async.py | 55 ++++++----- .../azure-eventhubs/azure/eventhub/client.py | 40 ++++---- .../azure/eventhub/client_abstract.py | 6 +- .../azure/eventhub/receiver.py | 95 ++++++++++--------- .../azure-eventhubs/azure/eventhub/sender.py | 57 ++++++----- .../eventprocessorhost/eh_partition_pump.py | 2 +- sdk/eventhub/azure-eventhubs/conftest.py | 6 +- .../async_examples/iterator_receiver_async.py | 2 +- .../examples/async_examples/recv_async.py | 2 +- .../examples/async_examples/send_async.py | 2 +- .../test_examples_eventhub_async.py | 14 +-- .../azure-eventhubs/examples/batch_send.py | 2 +- .../examples/client_secret_auth.py | 2 +- .../azure-eventhubs/examples/iothub_recv.py | 2 +- .../azure-eventhubs/examples/iothub_send.py | 2 +- .../examples/iterator_receiver.py | 2 +- .../azure-eventhubs/examples/proxy.py | 4 +- sdk/eventhub/azure-eventhubs/examples/recv.py | 2 +- .../azure-eventhubs/examples/recv_batch.py | 2 +- .../azure-eventhubs/examples/recv_epoch.py | 4 +- sdk/eventhub/azure-eventhubs/examples/send.py | 2 +- .../examples/test_examples_eventhub.py | 14 +-- .../tests/asynctests/test_auth_async.py | 4 +- .../asynctests/test_iothub_receive_async.py | 4 +- .../tests/asynctests/test_longrunning_eph.py | 2 +- .../test_longrunning_eph_with_context.py | 2 +- .../test_longrunning_receive_async.py | 2 +- .../asynctests/test_longrunning_send_async.py | 4 +- .../tests/asynctests/test_negative_async.py | 31 +++--- .../tests/asynctests/test_receive_async.py | 42 ++++---- .../test_receiver_iterator_async.py | 2 +- .../tests/asynctests/test_reconnect_async.py | 6 +- .../tests/asynctests/test_send_async.py | 24 ++--- .../azure-eventhubs/tests/test_auth.py | 4 +- .../tests/test_iothub_receive.py | 2 +- .../azure-eventhubs/tests/test_iothub_send.py | 2 +- .../tests/test_longrunning_receive.py | 4 +- .../tests/test_longrunning_send.py | 4 +- .../azure-eventhubs/tests/test_negative.py | 28 +++--- .../azure-eventhubs/tests/test_receive.py | 32 +++---- .../tests/test_receiver_iterator.py | 2 +- .../azure-eventhubs/tests/test_reconnect.py | 6 +- .../azure-eventhubs/tests/test_send.py | 26 ++--- 49 files changed, 377 insertions(+), 342 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/HISTORY.md b/sdk/eventhub/azure-eventhubs/HISTORY.md index 6103bbefdddb..d49df4d13950 100644 --- a/sdk/eventhub/azure-eventhubs/HISTORY.md +++ b/sdk/eventhub/azure-eventhubs/HISTORY.md @@ -1,6 +1,6 @@ # Release History -## 2.0.0-preview.1 (2019-06-17) +## 2.0.0b1 (2019-06-25) - Added more configuration parameters when creating EventHubClient. - New error hierarchy @@ -10,13 +10,13 @@ - `azure.error.AuthenticationError` - `azure.error.EventDataError` - `azure.error.EventDataSendError` -- Renamed Sender/Receiver to EventSender/EventReceiver - - New APIs for creating EventSender/EventReceiver. - - EventReceiver is now iterable. +- Renamed Sender/Receiver to EventHubProducer/EventHubConsumer + - New APIs for creating EventHubProducer/EventHubConsumer. + - EventHubConsumer is now iterable. - Rename class azure.eventhub.Offset to azure.eventhub.EventPosition -- Reorganized connection management, EventHubClient is no longer responsible for opening/closing EventSender/EventReceiver. - - Each EventSender/EventReceiver is responsible for its own connection management. - - Added support for context manager on EventSender and EventReceiver. +- Reorganized connection management, EventHubClient is no longer responsible for opening/closing EventHubProducer/EventHubConsumer. + - Each EventHubProducer/EventHubConsumer is responsible for its own connection management. + - Added support for context manager on EventHubProducer and EventHubConsumer. - Reorganized async APIs into "azure.eventhub.aio" namespace and rename to drop the "_async" suffix. - Added support for authentication using azure-core credential. - Added support for transport using AMQP over WebSocket. diff --git a/sdk/eventhub/azure-eventhubs/README.md b/sdk/eventhub/azure-eventhubs/README.md index 87e1bf6e25fc..a01c2bd53f2d 100644 --- a/sdk/eventhub/azure-eventhubs/README.md +++ b/sdk/eventhub/azure-eventhubs/README.md @@ -4,12 +4,12 @@ Azure Event Hubs is a big data streaming platform and event ingestion service. I Use the Event Hubs client library for Python to: -- Publish events to the Event Hubs service through a sender. -- Read events from the Event Hubs service through a receiver. +- Publish events to the Event Hubs service through a producer. +- Read events from the Event Hubs service through a consumer. On Python 3.5.3 and above, it also includes: -- An async sender and receiver that supports async/await methods. +- An async producer and consumer that supports async/await methods. - An Event Processor Host module that manages the distribution of partition readers. [Source code](https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/eventhub/azure-eventhubs) | [Package (PyPi)](https://pypi.org/project/azure-eventhub/) | [API reference documentation](https://docs.microsoft.com/python/api/azure-eventhub) | [Product documentation](https://docs.microsoft.com/en-ca/azure/event-hubs/) @@ -89,7 +89,7 @@ connection_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};En os.environ['EVENT_HUB_SAS_KEY'], os.environ['EVENT_HUB_NAME']) client = EventHubClient.from_connection_string(connection_str) -sender = client.create_sender(partition_id="0") +sender = client.create_producer(partition_id="0") try: event_list = [] @@ -119,7 +119,7 @@ connection_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};En os.environ['EVENT_HUB_SAS_KEY'], os.environ['EVENT_HUB_NAME']) client = EventHubClient.from_connection_string(connection_str) -receiver = client.create_receiver(partition_id="0", consumer_group="$default", event_position=EventPosition.new_events_only()) +receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition.new_events_only()) try: logger = logging.getLogger("azure.eventhub") @@ -148,7 +148,7 @@ connection_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};En os.environ['EVENT_HUB_SAS_KEY'], os.environ['EVENT_HUB_NAME']) client = EventHubClient.from_connection_string(connection_str) -sender = client.create_sender(partition_id="0") +sender = client.create_producer(partition_id="0") try: event_list = [] @@ -179,7 +179,7 @@ connection_str = "Endpoint=sb://{}/;SharedAccessKeyName={};SharedAccessKey={};En os.environ['EVENT_HUB_SAS_KEY'], os.environ['EVENT_HUB_NAME']) client = EventHubClient.from_connection_string(connection_str) -receiver = client.create_receiver(partition_id="0", consumer_group="$default", event_position=EventPosition.new_events_only()) +receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition.new_events_only()) try: logger = logging.getLogger("azure.eventhub") diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py index 4de864f577ce..e3eb7b2b5b38 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py @@ -3,14 +3,14 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -__version__ = "2.0.0-preview.1" +__version__ = "2.0.0b1" from azure.eventhub.common import EventData, EventPosition from azure.eventhub.error import EventHubError, EventDataError, ConnectError, \ AuthenticationError, EventDataSendError, ConnectionLostError from azure.eventhub.client import EventHubClient -from azure.eventhub.sender import EventSender -from azure.eventhub.receiver import EventReceiver +from azure.eventhub.sender import EventHubProducer +from azure.eventhub.receiver import EventHubConsumer from .constants import MessageSendResult from .constants import TransportType from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential @@ -26,8 +26,8 @@ "AuthenticationError", "EventPosition", "EventHubClient", - "EventSender", - "EventReceiver", + "EventHubProducer", + "EventHubConsumer", "MessageSendResult", "TransportType", "EventHubSharedKeyCredential", diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py index 88fd0673f4df..f0361bdc038b 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py @@ -1,9 +1,9 @@ from .event_hubs_client_async import EventHubClient -from .receiver_async import EventReceiver -from .sender_async import EventSender +from .receiver_async import EventHubConsumer +from .sender_async import EventHubProducer __all__ = [ "EventHubClient", - "EventReceiver", - "EventSender" + "EventHubConsumer", + "EventHubProducer" ] diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py index 68de439d7828..a14ce27029b2 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py @@ -20,8 +20,8 @@ EventHubError) from ..client_abstract import EventHubClientAbstract -from .sender_async import EventSender -from .receiver_async import EventReceiver +from .sender_async import EventHubProducer +from .receiver_async import EventHubConsumer log = logging.getLogger(__name__) @@ -80,7 +80,7 @@ def _create_auth(self, username=None, password=None): transport_type=transport_type) else: - get_jwt_token = functools.partial(self.credential.get_token, ['https://eventhubs.azure.net//.default']) + get_jwt_token = functools.partial(self.credential.get_token, 'https://eventhubs.azure.net//.default') return authentication.JWTTokenAsync(self.auth_uri, self.auth_uri, get_jwt_token, http_proxy=http_proxy, transport_type=transport_type) @@ -175,28 +175,28 @@ async def get_partition_properties(self, partition): finally: await mgmt_client.close_async() - def create_receiver( - self, partition_id, event_position, consumer_group="$Default", exclusive_receiver_priority=None, + def create_consumer( + self, consumer_group, partition_id, event_position, owner_level=None, operation=None, prefetch=None, loop=None): """ Create an async receiver to the client for a particular consumer group and partition. - :param partition_id: The ID of the partition. - :type partition_id: str :param consumer_group: The name of the consumer group. Default value is `$Default`. :type consumer_group: str + :param partition_id: The ID of the partition. + :type partition_id: str :param event_position: The position from which to start receiving. :type event_position: ~azure.eventhub.common.EventPosition - :param exclusive_receiver_priority: The priority of the exclusive receiver. The client will create an exclusive - receiver if exclusive_receiver_priority is set. - :type exclusive_receiver_priority: int + :param owner_level: The priority of the exclusive receiver. The client will create an exclusive + receiver if owner_level is set. + :type owner_level: int :param operation: An optional operation to be appended to the hostname in the source URL. The value must start with `/` character. :type operation: str :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int :param loop: An event loop. If not specified the default event loop will be used. - :rtype: ~azure.eventhub.aio.receiver_async.EventReceiver + :rtype: ~azure.eventhub.aio.receiver_async.EventHubConsumer Example: .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py @@ -212,12 +212,12 @@ def create_receiver( path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, path, consumer_group, partition_id) - handler = EventReceiver( - self, source_url, event_position=event_position, exclusive_receiver_priority=exclusive_receiver_priority, + handler = EventHubConsumer( + self, source_url, event_position=event_position, owner_level=owner_level, prefetch=prefetch, loop=loop) return handler - def create_sender( + def create_producer( self, partition_id=None, operation=None, send_timeout=None, loop=None): """ Create an async sender to the client to send ~azure.eventhub.common.EventData object @@ -234,7 +234,7 @@ def create_sender( queued. Default value is 60 seconds. If set to 0, there will be no timeout. :type send_timeout: float :param loop: An event loop. If not specified the default event loop will be used. - :rtype ~azure.eventhub.aio.sender_async.EventSender + :rtype ~azure.eventhub.aio.sender_async.EventHubProducer Example: @@ -252,6 +252,6 @@ def create_sender( target = target + operation send_timeout = self.config.send_timeout if send_timeout is None else send_timeout - handler = EventSender( + handler = EventHubProducer( self, target, partition=partition_id, send_timeout=send_timeout, loop=loop) return handler diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py index 6ee82725d2e2..00818195a479 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py @@ -16,16 +16,16 @@ log = logging.getLogger(__name__) -class EventReceiver(object): +class EventHubConsumer(object): """ - Implements the async API of a EventReceiver. + Implements the async API of a EventHubConsumer. """ timeout = 0 _epoch = b'com.microsoft:epoch' def __init__( # pylint: disable=super-init-not-called - self, client, source, event_position=None, prefetch=300, exclusive_receiver_priority=None, + self, client, source, event_position=None, prefetch=300, owner_level=None, keep_alive=None, auto_reconnect=True, loop=None): """ Instantiate an async receiver. @@ -39,9 +39,9 @@ def __init__( # pylint: disable=super-init-not-called :param prefetch: The number of events to prefetch from the service for processing. Default is 300. :type prefetch: int - :param exclusive_receiver_priority: The priority of the exclusive receiver. It will an exclusive - receiver if exclusive_receiver_priority is set. - :type exclusive_receiver_priority: int + :param owner_level: The priority of the exclusive receiver. It will an exclusive + receiver if owner_level is set. + :type owner_level: int :param loop: An event loop. """ self.loop = loop or asyncio.get_event_loop() @@ -51,7 +51,7 @@ def __init__( # pylint: disable=super-init-not-called self.offset = event_position self.messages_iter = None self.prefetch = prefetch - self.exclusive_receiver_priority = exclusive_receiver_priority + self.owner_level = owner_level self.keep_alive = keep_alive self.auto_reconnect = auto_reconnect self.retry_policy = errors.ErrorPolicy(max_retries=self.client.config.max_retries, on_error=_error_handler) @@ -64,8 +64,8 @@ def __init__( # pylint: disable=super-init-not-called source = Source(self.source) if self.offset is not None: source.set_filter(self.offset._selector()) # pylint: disable=protected-access - if exclusive_receiver_priority: - self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(exclusive_receiver_priority))} + if owner_level: + self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(owner_level))} self._handler = ReceiveClientAsync( source, auth=self.client.get_auth(), @@ -103,46 +103,46 @@ async def __anext__(self): return event_data except errors.AuthenticationException as auth_error: if connecting_count < max_retries: - log.info("EventReceiver disconnected due to token error. Attempting reconnect.") + log.info("EventHubConsumer disconnected due to token error. Attempting reconnect.") await self._reconnect() else: - log.info("EventReceiver authentication failed. Shutting down.") + log.info("EventHubConsumer authentication failed. Shutting down.") error = AuthenticationError(str(auth_error), auth_error) await self.close(auth_error) raise error except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry and self.auto_reconnect: - log.info("EventReceiver detached. Attempting reconnect.") + log.info("EventHubConsumer detached. Attempting reconnect.") await self._reconnect() else: - log.info("EventReceiver detached. Shutting down.") + log.info("EventHubConsumer detached. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) await self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: if connecting_count < max_retries: - log.info("EventReceiver detached. Attempting reconnect.") + log.info("EventHubConsumer detached. Attempting reconnect.") await self._reconnect() else: - log.info("EventReceiver detached. Shutting down.") + log.info("EventHubConsumer detached. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) await self.close(error) raise error except errors.AMQPConnectionError as shutdown: if connecting_count < max_retries: - log.info("EventReceiver connection lost. Attempting reconnect.") + log.info("EventHubConsumer connection lost. Attempting reconnect.") await self._reconnect() else: - log.info("EventReceiver connection lost. Shutting down.") + log.info("EventHubConsumer connection lost. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) await self.close(error) raise error except compat.TimeoutException as shutdown: if connecting_count < max_retries: - log.info("EventReceiver timed out receiving event data. Attempting reconnect.") + log.info("EventHubConsumer timed out receiving event data. Attempting reconnect.") await self._reconnect() else: - log.info("EventReceiver timed out. Shutting down.") + log.info("EventHubConsumer timed out. Shutting down.") await self.close(shutdown) raise TimeoutError(str(shutdown), shutdown) except StopAsyncIteration: @@ -159,7 +159,7 @@ def _check_closed(self): self.error) async def _open(self): """ - Open the EventReceiver using the supplied connection. + Open the EventHubConsumer using the supplied connection. If the handler has previously been redirected, the redirect context will be used to create a new handler before opening it. @@ -227,48 +227,57 @@ async def _build_connection(self, is_reconnect=False): # pylint: disable=too-ma return True except errors.AuthenticationException as shutdown: if is_reconnect: - log.info("EventReceiver couldn't authenticate. Shutting down. (%r)", shutdown) + log.info("EventHubConsumer couldn't authenticate. Shutting down. (%r)", shutdown) error = AuthenticationError(str(shutdown), shutdown) await self.close(exception=error) raise error else: - log.info("EventReceiver couldn't authenticate. Attempting reconnect.") + log.info("EventHubConsumer couldn't authenticate. Attempting reconnect.") return False except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry: - log.info("EventReceiver detached. Attempting reconnect.") + log.info("EventHubConsumer detached. Attempting reconnect.") return False else: - log.info("EventReceiver detached. Shutting down.") + log.info("EventHubConsumer detached. Shutting down.") error = ConnectError(str(shutdown), shutdown) await self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: if is_reconnect: - log.info("EventReceiver detached. Shutting down.") + log.info("EventHubConsumer detached. Shutting down.") error = ConnectError(str(shutdown), shutdown) await self.close(exception=error) raise error else: - log.info("EventReceiver detached. Attempting reconnect.") + log.info("EventHubConsumer detached. Attempting reconnect.") return False except errors.AMQPConnectionError as shutdown: if is_reconnect: - log.info("EventReceiver connection error (%r). Shutting down.", shutdown) + log.info("EventHubConsumer connection error (%r). Shutting down.", shutdown) error = AuthenticationError(str(shutdown), shutdown) await self.close(exception=error) raise error else: - log.info("EventReceiver couldn't authenticate. Attempting reconnect.") + log.info("EventHubConsumer couldn't authenticate. Attempting reconnect.") + return False + except compat.TimeoutException as shutdown: + if is_reconnect: + log.info("EventHubConsumer authentication timed out. Shutting down.") + error = AuthenticationError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + else: + log.info("EventHubConsumer authentication timed out. Attempting reconnect.") return False except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("EventReceiver reconnect failed: {}".format(e)) + error = EventHubError("EventHubConsumer reconnect failed: {}".format(e)) await self.close(exception=error) raise error async def _reconnect(self): - """If the EventReceiver was disconnected from the service with + """If the EventHubConsumer was disconnected from the service with a retryable error - attempt to reconnect.""" return await self._build_connection(is_reconnect=True) @@ -366,46 +375,46 @@ async def receive(self, max_batch_size=None, timeout=None): return data_batch except errors.AuthenticationException as auth_error: if connecting_count < max_retries: - log.info("EventReceiver disconnected due to token error. Attempting reconnect.") + log.info("EventHubConsumer disconnected due to token error. Attempting reconnect.") await self._reconnect() else: - log.info("EventReceiver authentication failed. Shutting down.") + log.info("EventHubConsumer authentication failed. Shutting down.") error = AuthenticationError(str(auth_error), auth_error) await self.close(auth_error) raise error except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry and self.auto_reconnect: - log.info("EventReceiver detached. Attempting reconnect.") + log.info("EventHubConsumer detached. Attempting reconnect.") await self._reconnect() else: - log.info("EventReceiver detached. Shutting down.") + log.info("EventHubConsumer detached. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) await self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: if connecting_count < max_retries: - log.info("EventReceiver detached. Attempting reconnect.") + log.info("EventHubConsumer detached. Attempting reconnect.") await self._reconnect() else: - log.info("EventReceiver detached. Shutting down.") + log.info("EventHubConsumer detached. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) await self.close(error) raise error except errors.AMQPConnectionError as shutdown: if connecting_count < max_retries: - log.info("EventReceiver connection lost. Attempting reconnect.") + log.info("EventHubConsumer connection lost. Attempting reconnect.") await self._reconnect() else: - log.info("EventReceiver connection lost. Shutting down.") + log.info("EventHubConsumer connection lost. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) await self.close(error) raise error except compat.TimeoutException as shutdown: if connecting_count < max_retries: - log.info("EventReceiver timed out receiving event data. Attempting reconnect.") + log.info("EventHubConsumer timed out receiving event data. Attempting reconnect.") await self._reconnect() else: - log.info("EventReceiver timed out. Shutting down.") + log.info("EventHubConsumer timed out. Shutting down.") await self.close(shutdown) raise TimeoutError(str(shutdown), shutdown) except Exception as e: diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py index f2d09a2df457..c526d17faf4a 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py @@ -19,9 +19,9 @@ log = logging.getLogger(__name__) -class EventSender(object): +class EventHubProducer(object): """ - Implements the async API of a EventSender. + Implements the async API of a EventHubProducer. """ @@ -87,7 +87,7 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): async def _open(self): """ - Open the EventSender using the supplied connection. + Open the EventHubProducer using the supplied connection. If the handler has previously been redirected, the redirect context will be used to create a new handler before opening it. @@ -141,43 +141,52 @@ async def _build_connection(self, is_reconnect=False): return True except errors.AuthenticationException as shutdown: if is_reconnect: - log.info("EventSender couldn't authenticate. Shutting down. (%r)", shutdown) + log.info("EventHubProducer couldn't authenticate. Shutting down. (%r)", shutdown) error = AuthenticationError(str(shutdown), shutdown) await self.close(exception=error) raise error else: - log.info("EventSender couldn't authenticate. Attempting reconnect.") + log.info("EventHubProducer couldn't authenticate. Attempting reconnect.") return False except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry: - log.info("EventSender detached. Attempting reconnect.") + log.info("EventHubProducer detached. Attempting reconnect.") return False else: - log.info("EventSender detached. Shutting down.") + log.info("EventHubProducer detached. Shutting down.") error = ConnectError(str(shutdown), shutdown) await self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: if is_reconnect: - log.info("EventSender detached. Shutting down.") + log.info("EventHubProducer detached. Shutting down.") error = ConnectError(str(shutdown), shutdown) await self.close(exception=error) raise error else: - log.info("EventSender detached. Attempting reconnect.") + log.info("EventHubProducer detached. Attempting reconnect.") return False except errors.AMQPConnectionError as shutdown: if is_reconnect: - log.info("EventSender connection error (%r). Shutting down.", shutdown) + log.info("EventHubProducer connection error (%r). Shutting down.", shutdown) error = AuthenticationError(str(shutdown), shutdown) await self.close(exception=error) raise error else: - log.info("EventSender couldn't authenticate. Attempting reconnect.") + log.info("EventHubProducer couldn't authenticate. Attempting reconnect.") + return False + except compat.TimeoutException as shutdown: + if is_reconnect: + log.info("EventHubProducer authentication timed out. Shutting down.") + error = AuthenticationError(str(shutdown), shutdown) + await self.close(exception=error) + raise error + else: + log.info("EventHubProducer authentication timed out. Attempting reconnect.") return False except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("EventSender Reconnect failed: {}".format(e)) + error = EventHubError("EventHubProducer Reconnect failed: {}".format(e)) await self.close(exception=error) raise error @@ -230,7 +239,7 @@ async def _send_event_data(self): await self._handler.wait_async() self.unsent_events = self._handler.pending_messages if self._outcome != constants.MessageSendResult.Ok: - EventSender._error(self._outcome, self._condition) + EventHubProducer._error(self._outcome, self._condition) return except (errors.MessageAccepted, errors.MessageAlreadySettled, @@ -246,46 +255,46 @@ async def _send_event_data(self): raise error except errors.AuthenticationException as auth_error: if connecting_count < max_retries: - log.info("EventSender disconnected due to token error. Attempting reconnect.") + log.info("EventHubProducer disconnected due to token error. Attempting reconnect.") await self._reconnect() else: - log.info("EventSender authentication failed. Shutting down.") + log.info("EventHubProducer authentication failed. Shutting down.") error = AuthenticationError(str(auth_error), auth_error) await self.close(auth_error) raise error except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry: - log.info("EventSender detached. Attempting reconnect.") + log.info("EventHubProducer detached. Attempting reconnect.") await self._reconnect() else: - log.info("EventSender detached. Shutting down.") + log.info("EventHubProducer detached. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) await self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: if connecting_count < max_retries: - log.info("EventSender detached. Attempting reconnect.") + log.info("EventHubProducer detached. Attempting reconnect.") await self._reconnect() else: - log.info("EventSender detached. Shutting down.") + log.info("EventHubProducer detached. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) await self.close(error) raise error except errors.AMQPConnectionError as shutdown: if connecting_count < max_retries: - log.info("EventSender connection lost. Attempting reconnect.") + log.info("EventHubProducer connection lost. Attempting reconnect.") await self._reconnect() else: - log.info("EventSender connection lost. Shutting down.") + log.info("EventHubProducer connection lost. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) await self.close(error) raise error except compat.TimeoutException as shutdown: if connecting_count < max_retries: - log.info("EventSender timed out sending event data. Attempting reconnect.") + log.info("EventHubProducer timed out sending event data. Attempting reconnect.") await self._reconnect() else: - log.info("EventSender timed out. Shutting down.") + log.info("EventHubProducer timed out. Shutting down.") await self.close(shutdown) raise TimeoutError(str(shutdown), shutdown) except Exception as e: diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index ae20fb00aa38..66ef6a307a69 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -23,8 +23,8 @@ from uamqp import constants from azure.eventhub import __version__ -from azure.eventhub.sender import EventSender -from azure.eventhub.receiver import EventReceiver +from azure.eventhub.sender import EventHubProducer +from azure.eventhub.receiver import EventHubConsumer from azure.eventhub.common import parse_sas_token, EventPosition from azure.eventhub.error import EventHubError from .client_abstract import EventHubClientAbstract @@ -89,7 +89,7 @@ def _create_auth(self, username=None, password=None): else: # Azure credential get_jwt_token = functools.partial(self.credential.get_token, - ['https://eventhubs.azure.net//.default']) + 'https://eventhubs.azure.net//.default') return authentication.JWTTokenAuth(self.auth_uri, self.auth_uri, get_jwt_token, http_proxy=http_proxy, transport_type=transport_type) @@ -187,29 +187,29 @@ def get_partition_properties(self, partition): finally: mgmt_client.close() - def create_receiver( - self, partition_id, event_position, consumer_group="$Default", - exclusive_receiver_priority=None, operation=None, prefetch=None, + def create_consumer( + self, consumer_group, partition_id, event_position, + owner_level=None, operation=None, prefetch=None, ): - # type: (str, str, EventPosition, int, str, int) -> EventReceiver + # type: (str, str, EventPosition, int, str, int) -> EventHubConsumer """ Create a receiver to the client for a particular consumer group and partition. - :param partition_id: The ID of the partition. - :type partition_id: str :param consumer_group: The name of the consumer group. Default value is `$Default`. :type consumer_group: str + :param partition_id: The ID of the partition. + :type partition_id: str :param event_position: The position from which to start receiving. :type event_position: ~azure.eventhub.common.EventPosition - :param exclusive_receiver_priority: The priority of the exclusive receiver. The client will create an exclusive - receiver if exclusive_receiver_priority is set. - :type exclusive_receiver_priority: int + :param owner_level: The priority of the exclusive receiver. The client will create an exclusive + receiver if owner_level is set. + :type owner_level: int :param operation: An optional operation to be appended to the hostname in the source URL. The value must start with `/` character. :type operation: str :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int - :rtype: ~azure.eventhub.receiver.EventReceiver + :rtype: ~azure.eventhub.receiver.EventHubConsumer Example: .. literalinclude:: ../examples/test_examples_eventhub.py @@ -225,15 +225,15 @@ def create_receiver( path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, path, consumer_group, partition_id) - handler = EventReceiver( - self, source_url, event_position=event_position, exclusive_receiver_priority=exclusive_receiver_priority, + handler = EventHubConsumer( + self, source_url, event_position=event_position, owner_level=owner_level, prefetch=prefetch) return handler - def create_sender(self, partition_id=None, operation=None, send_timeout=None): - # type: (str, str, float) -> EventSender + def create_producer(self, partition_id=None, operation=None, send_timeout=None): + # type: (str, str, float) -> EventHubProducer """ - Create a sender to the client to send EventData object to an EventHub. + Create a EventHubProducer to send EventData object to an EventHub. :param partition_id: Optionally specify a particular partition to send to. If omitted, the events will be distributed to available partitions via @@ -245,7 +245,7 @@ def create_sender(self, partition_id=None, operation=None, send_timeout=None): :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is queued. Default value is 60 seconds. If set to 0, there will be no timeout. :type send_timeout: int - :rtype: ~azure.eventhub.sender.EventSender + :rtype: ~azure.eventhub.sender.EventHubProducer Example: .. literalinclude:: ../examples/test_examples_eventhub.py @@ -261,6 +261,6 @@ def create_sender(self, partition_id=None, operation=None, send_timeout=None): target = target + operation send_timeout = self.config.send_timeout if send_timeout is None else send_timeout - handler = EventSender( + handler = EventHubProducer( self, target, partition=partition_id, send_timeout=send_timeout) return handler diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 15acb45e8057..f0817e0189c9 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -315,13 +315,13 @@ def _process_redirect_uri(self, redirect): self.mgmt_target = redirect_uri @abstractmethod - def create_receiver( - self, partition_id, event_position, consumer_group="$Default", exclusive_receiver_priority=None, + def create_consumer( + self, consumer_group, partition_id, event_position, owner_level=None, operation=None, prefetch=None, ): pass @abstractmethod - def create_sender(self, partition_id=None, operation=None, send_timeout=None): + def create_producer(self, partition_id=None, operation=None, send_timeout=None): pass diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py index ac122b9e1881..d7816d3aeeba 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py @@ -19,15 +19,15 @@ log = logging.getLogger(__name__) -class EventReceiver(object): +class EventHubConsumer(object): """ - Implements a EventReceiver. + Implements a EventHubConsumer. """ timeout = 0 _epoch = b'com.microsoft:epoch' - def __init__(self, client, source, event_position=None, prefetch=300, exclusive_receiver_priority=None, + def __init__(self, client, source, event_position=None, prefetch=300, owner_level=None, keep_alive=None, auto_reconnect=True): """ Instantiate a receiver. @@ -39,9 +39,9 @@ def __init__(self, client, source, event_position=None, prefetch=300, exclusive_ :param prefetch: The number of events to prefetch from the service for processing. Default is 300. :type prefetch: int - :param exclusive_receiver_priority: The priority of the exclusive receiver. It will an exclusive - receiver if exclusive_receiver_priority is set. - :type exclusive_receiver_priority: int + :param owner_level: The priority of the exclusive receiver. It will an exclusive + receiver if owner_level is set. + :type owner_level: int """ self.running = False self.client = client @@ -49,7 +49,7 @@ def __init__(self, client, source, event_position=None, prefetch=300, exclusive_ self.offset = event_position self.messages_iter = None self.prefetch = prefetch - self.exclusive_receiver_priority = exclusive_receiver_priority + self.owner_level = owner_level self.keep_alive = keep_alive self.auto_reconnect = auto_reconnect self.retry_policy = errors.ErrorPolicy(max_retries=self.client.config.max_retries, on_error=_error_handler) @@ -62,8 +62,8 @@ def __init__(self, client, source, event_position=None, prefetch=300, exclusive_ source = Source(self.source) if self.offset is not None: source.set_filter(self.offset._selector()) # pylint: disable=protected-access - if exclusive_receiver_priority: - self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(exclusive_receiver_priority))} + if owner_level: + self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(owner_level))} self._handler = ReceiveClient( source, auth=self.client.get_auth(), @@ -100,53 +100,53 @@ def __next__(self): return event_data except errors.AuthenticationException as auth_error: if connecting_count < max_retries: - log.info("EventReceiver disconnected due to token error. Attempting reconnect.") + log.info("EventHubConsumer disconnected due to token error. Attempting reconnect.") self._reconnect() else: - log.info("EventReceiver authentication failed. Shutting down.") + log.info("EventHubConsumer authentication failed. Shutting down.") error = AuthenticationError(str(auth_error), auth_error) self.close(auth_error) raise error except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry and self.auto_reconnect: - log.info("EventReceiver detached. Attempting reconnect.") + log.info("EventHubConsumer detached. Attempting reconnect.") self._reconnect() else: - log.info("EventReceiver detached. Shutting down.") + log.info("EventHubConsumer detached. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: if connecting_count < max_retries: - log.info("EventReceiver detached. Attempting reconnect.") + log.info("EventHubConsumer detached. Attempting reconnect.") self._reconnect() else: - log.info("EventReceiver detached. Shutting down.") + log.info("EventHubConsumer detached. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) self.close(error) raise error except errors.AMQPConnectionError as shutdown: if connecting_count < max_retries: - log.info("EventReceiver connection lost. Attempting reconnect.") + log.info("EventHubConsumer connection lost. Attempting reconnect.") self._reconnect() else: - log.info("EventReceiver connection lost. Shutting down.") + log.info("EventHubConsumer connection lost. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) self.close(error) raise error except compat.TimeoutException as shutdown: if connecting_count < max_retries: - log.info("EventReceiver timed out receiving event data. Attempting reconnect.") + log.info("EventHubConsumer timed out receiving event data. Attempting reconnect.") self._reconnect() else: - log.info("EventReceiver timed out. Shutting down.") + log.info("EventHubConsumer timed out. Shutting down.") self.close(shutdown) raise TimeoutError(str(shutdown), shutdown) except StopIteration: raise except KeyboardInterrupt: - log.info("EventReceiver stops due to keyboard interrupt") - print("EventReceiver stopped") + log.info("EventHubConsumer stops due to keyboard interrupt") + print("EventHubConsumer stopped") self.close() raise except Exception as e: @@ -168,7 +168,7 @@ def _redirect(self, redirect): def _open(self): """ - Open the EventReceiver using the supplied connection. + Open the EventHubConsumer using the supplied connection. If the handler has previously been redirected, the redirect context will be used to create a new handler before opening it. @@ -243,46 +243,55 @@ def _build_connection(self, is_reconnect=False): return True except errors.AuthenticationException as shutdown: if is_reconnect: - log.info("EventReceiver couldn't authenticate. Shutting down. (%r)", shutdown) + log.info("EventHubConsumer couldn't authenticate. Shutting down. (%r)", shutdown) error = AuthenticationError(str(shutdown), shutdown) self.close(exception=error) raise error else: - log.info("EventReceiver couldn't authenticate. Attempting reconnect.") + log.info("EventHubConsumer couldn't authenticate. Attempting reconnect.") return False except errors.LinkRedirect as redirect: self._redirect(redirect) return True except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry: - log.info("EventReceiver detached. Attempting reconnect.") + log.info("EventHubConsumer detached. Attempting reconnect.") return False else: - log.info("EventReceiver detached. Shutting down.") + log.info("EventHubConsumer detached. Shutting down.") error = ConnectError(str(shutdown), shutdown) self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: if is_reconnect: - log.info("EventReceiver detached. Shutting down.") + log.info("EventHubConsumer detached. Shutting down.") error = ConnectError(str(shutdown), shutdown) self.close(exception=error) raise error else: - log.info("EventReceiver detached. Attempting reconnect.") + log.info("EventHubConsumer detached. Attempting reconnect.") return False except errors.AMQPConnectionError as shutdown: if is_reconnect: - log.info("EventReceiver connection error (%r). Shutting down.", shutdown) + log.info("EventHubConsumer connection error (%r). Shutting down.", shutdown) error = AuthenticationError(str(shutdown), shutdown) self.close(exception=error) raise error else: - log.info("EventReceiver couldn't authenticate. Attempting reconnect.") + log.info("EventHubConsumer couldn't authenticate. Attempting reconnect.") + return False + except compat.TimeoutException as shutdown: + if is_reconnect: + log.info("EventHubConsumer authentication timed out. Shutting down.") + error = AuthenticationError(str(shutdown), shutdown) + self.close(exception=error) + raise error + else: + log.info("EventHubConsumer authentication timed out. Attempting reconnect.") return False except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("EventReceiver reconnect failed: {}".format(e)) + error = EventHubError("EventHubConsumer reconnect failed: {}".format(e)) self.close(exception=error) raise error @@ -385,51 +394,51 @@ def receive(self, max_batch_size=None, timeout=None): return data_batch except errors.AuthenticationException as auth_error: if connecting_count < max_retries: - log.info("EventReceiver disconnected due to token error. Attempting reconnect.") + log.info("EventHubConsumer disconnected due to token error. Attempting reconnect.") self._reconnect() else: - log.info("EventReceiver authentication failed. Shutting down.") + log.info("EventHubConsumer authentication failed. Shutting down.") error = AuthenticationError(str(auth_error), auth_error) self.close(auth_error) raise error except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry and self.auto_reconnect: - log.info("EventReceiver detached. Attempting reconnect.") + log.info("EventHubConsumer detached. Attempting reconnect.") self._reconnect() else: - log.info("EventReceiver detached. Shutting down.") + log.info("EventHubConsumer detached. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: if connecting_count < max_retries: - log.info("EventReceiver detached. Attempting reconnect.") + log.info("EventHubConsumer detached. Attempting reconnect.") self._reconnect() else: - log.info("EventReceiver detached. Shutting down.") + log.info("EventHubConsumer detached. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) self.close(error) raise error except errors.AMQPConnectionError as shutdown: if connecting_count < max_retries: - log.info("EventReceiver connection lost. Attempting reconnect.") + log.info("EventHubConsumer connection lost. Attempting reconnect.") self._reconnect() else: - log.info("EventReceiver connection lost. Shutting down.") + log.info("EventHubConsumer connection lost. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) self.close(error) raise error except compat.TimeoutException as shutdown: if connecting_count < max_retries: - log.info("EventReceiver timed out receiving event data. Attempting reconnect.") + log.info("EventHubConsumer timed out receiving event data. Attempting reconnect.") self._reconnect() else: - log.info("EventReceiver timed out. Shutting down.") + log.info("EventHubConsumer timed out. Shutting down.") self.close(shutdown) raise TimeoutError(str(shutdown), shutdown) except KeyboardInterrupt: - log.info("EventReceiver stops due to keyboard interrupt") - print("EventReceiver stopped") + log.info("EventHubConsumer stops due to keyboard interrupt") + print("EventHubConsumer stopped") self.close() raise except Exception as e: diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py index 5e6754281428..736bcd7a397c 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py @@ -20,15 +20,15 @@ log = logging.getLogger(__name__) -class EventSender(object): +class EventHubProducer(object): """ - Implements a EventSender. + Implements a EventHubProducer. """ def __init__(self, client, target, partition=None, send_timeout=60, keep_alive=None, auto_reconnect=True): """ - Instantiate an EventHub event EventSender handler. + Instantiate an EventHub event EventHubProducer handler. :param client: The parent EventHubClient. :type client: ~azure.eventhub.client.EventHubClient. @@ -83,7 +83,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): def _open(self): """ - Open the EventSender using the supplied connection. + Open the EventHubProducer using the supplied connection. If the handler has previously been redirected, the redirect context will be used to create a new handler before opening it. @@ -138,43 +138,52 @@ def _build_connection(self, is_reconnect=False): return True except errors.AuthenticationException as shutdown: if is_reconnect: - log.info("EventSender couldn't authenticate. Shutting down. (%r)", shutdown) + log.info("EventHubProducer couldn't authenticate. Shutting down. (%r)", shutdown) error = AuthenticationError(str(shutdown), shutdown) self.close(exception=error) raise error else: - log.info("EventSender couldn't authenticate. Attempting reconnect.") + log.info("EventHubProducer couldn't authenticate. Attempting reconnect.") return False except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry: - log.info("EventSender detached. Attempting reconnect.") + log.info("EventHubProducer detached. Attempting reconnect.") return False else: - log.info("EventSender detached. Shutting down.") + log.info("EventHubProducer detached. Shutting down.") error = ConnectError(str(shutdown), shutdown) self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: if is_reconnect: - log.info("EventSender detached. Shutting down.") + log.info("EventHubProducer detached. Shutting down.") error = ConnectError(str(shutdown), shutdown) self.close(exception=error) raise error else: - log.info("EventSender detached. Attempting reconnect.") + log.info("EventHubProducer detached. Attempting reconnect.") return False except errors.AMQPConnectionError as shutdown: if is_reconnect: - log.info("EventSender connection error (%r). Shutting down.", shutdown) + log.info("EventHubProducer connection error (%r). Shutting down.", shutdown) error = AuthenticationError(str(shutdown), shutdown) self.close(exception=error) raise error else: - log.info("EventSender couldn't authenticate. Attempting reconnect.") + log.info("EventHubProducer couldn't authenticate. Attempting reconnect.") + return False + except compat.TimeoutException as shutdown: + if is_reconnect: + log.info("EventHubProducer authentication timed out. Shutting down.") + error = AuthenticationError(str(shutdown), shutdown) + self.close(exception=error) + raise error + else: + log.info("EventHubProducer authentication timed out. Attempting reconnect.") return False except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("EventSender Reconnect failed: {}".format(e)) + error = EventHubError("EventHubProducer failed to connect: {}".format(e)) self.close(exception=error) raise error @@ -225,7 +234,7 @@ def _send_event_data(self): self._handler.wait() self.unsent_events = self._handler.pending_messages if self._outcome != constants.MessageSendResult.Ok: - EventSender._error(self._outcome, self._condition) + EventHubProducer._error(self._outcome, self._condition) return except (errors.MessageAccepted, errors.MessageAlreadySettled, @@ -241,46 +250,46 @@ def _send_event_data(self): raise error except errors.AuthenticationException as auth_error: if connecting_count < max_retries: - log.info("EventSender disconnected due to token error. Attempting reconnect.") + log.info("EventHubProducer disconnected due to token error. Attempting reconnect.") self._reconnect() else: - log.info("EventSender authentication failed. Shutting down.") + log.info("EventHubProducer authentication failed. Shutting down.") error = AuthenticationError(str(auth_error), auth_error) self.close(auth_error) raise error except (errors.LinkDetach, errors.ConnectionClose) as shutdown: if shutdown.action.retry: - log.info("EventSender detached. Attempting reconnect.") + log.info("EventHubProducer detached. Attempting reconnect.") self._reconnect() else: - log.info("EventSender detached. Shutting down.") + log.info("EventHubProducer detached. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) self.close(exception=error) raise error except errors.MessageHandlerError as shutdown: if connecting_count < max_retries: - log.info("EventSender detached. Attempting reconnect.") + log.info("EventHubProducer detached. Attempting reconnect.") self._reconnect() else: - log.info("EventSender detached. Shutting down.") + log.info("EventHubProducer detached. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) self.close(error) raise error except errors.AMQPConnectionError as shutdown: if connecting_count < max_retries: - log.info("EventSender connection lost. Attempting reconnect.") + log.info("EventHubProducer connection lost. Attempting reconnect.") self._reconnect() else: - log.info("EventSender connection lost. Shutting down.") + log.info("EventHubProducer connection lost. Shutting down.") error = ConnectionLostError(str(shutdown), shutdown) self.close(error) raise error except compat.TimeoutException as shutdown: if connecting_count < max_retries: - log.info("EventSender timed out sending event data. Attempting reconnect.") + log.info("EventHubProducer timed out sending event data. Attempting reconnect.") self._reconnect() else: - log.info("EventSender timed out. Shutting down.") + log.info("EventHubProducer timed out. Shutting down.") self.close(shutdown) raise TimeoutError(str(shutdown), shutdown) except Exception as e: diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/eh_partition_pump.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/eh_partition_pump.py index 84d6a9ae84fe..598a93c639c0 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/eh_partition_pump.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/eh_partition_pump.py @@ -72,7 +72,7 @@ async def open_clients_async(self): hostname, event_hub_path, shared_key_cred, network_tracing=self.host.eph_options.debug_trace, http_proxy=self.host.eph_options.http_proxy) - self.partition_receive_handler = self.eh_client.create_receiver( + self.partition_receive_handler = self.eh_client.create_consumer( partition_id=self.partition_context.partition_id, consumer_group=self.partition_context.consumer_group_name, event_position=EventPosition(self.partition_context.offset), diff --git a/sdk/eventhub/azure-eventhubs/conftest.py b/sdk/eventhub/azure-eventhubs/conftest.py index 9b13fa2a61f6..937dd0eed850 100644 --- a/sdk/eventhub/azure-eventhubs/conftest.py +++ b/sdk/eventhub/azure-eventhubs/conftest.py @@ -29,7 +29,7 @@ from azure.eventprocessorhost.partition_pump import PartitionPump from azure.eventprocessorhost.partition_manager import PartitionManager -from azure.eventhub import EventHubClient, EventReceiver, EventPosition +from azure.eventhub import EventHubClient, EventHubConsumer, EventPosition def get_logger(filename, level=logging.INFO): @@ -176,7 +176,7 @@ def connstr_receivers(connection_str): partitions = client.get_partition_ids() receivers = [] for p in partitions: - receiver = client.create_receiver(partition_id=p, event_position=EventPosition("-1"), prefetch=500) + receiver = client.create_consumer(consumer_group="$default", partition_id=p, event_position=EventPosition("-1"), prefetch=500) receiver._open() receivers.append(receiver) yield connection_str, receivers @@ -192,7 +192,7 @@ def connstr_senders(connection_str): senders = [] for p in partitions: - sender = client.create_sender(partition_id=p) + sender = client.create_producer(partition_id=p) senders.append(sender) yield connection_str, senders for s in senders: diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/iterator_receiver_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/iterator_receiver_async.py index 8149c1c2a51d..7bf5227f8054 100644 --- a/sdk/eventhub/azure-eventhubs/examples/async_examples/iterator_receiver_async.py +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/iterator_receiver_async.py @@ -41,7 +41,7 @@ async def main(): raise ValueError("No EventHubs URL supplied.") client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EVENT_POSITION) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EVENT_POSITION) await iter_receiver(receiver) if __name__ == '__main__': diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_async.py index 63ad3428fb40..f6bb9de8d32f 100644 --- a/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_async.py +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_async.py @@ -31,7 +31,7 @@ async def pump(client, partition): - receiver = client.create_receiver(partition_id=partition, event_position=EVENT_POSITION, prefetch=5) + receiver = client.create_consumer(consumer_group="$default", partition_id=partition, event_position=EVENT_POSITION, prefetch=5) async with receiver: total = 0 start_time = time.time() diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py index 200a2be8ad98..76a56b8519b2 100644 --- a/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py @@ -30,7 +30,7 @@ async def run(client): - sender = client.create_sender() + sender = client.create_producer() await send(sender, 4) diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py index 4a36faa88947..9790259c5129 100644 --- a/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py @@ -32,20 +32,20 @@ async def test_example_eventhub_async_send_and_receive(live_eventhub_config): # [START create_eventhub_client_async_sender] client = EventHubClient.from_connection_string(connection_str) # Create an async sender. - sender = client.create_sender(partition_id="0") + sender = client.create_producer(partition_id="0") # [END create_eventhub_client_async_sender] # [START create_eventhub_client_async_receiver] client = EventHubClient.from_connection_string(connection_str) # Create an async receiver. - receiver = client.create_receiver(partition_id="0", consumer_group="$default", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) # Create an exclusive async receiver. - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest'), exclusive_receiver_priority=1) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest'), owner_level=1) # [END create_eventhub_client_async_receiver] client = EventHubClient.from_connection_string(connection_str) - sender = client.create_sender(partition_id="0") - receiver = client.create_receiver(partition_id="0", consumer_group="$default", event_position=EventPosition('@latest')) + sender = client.create_producer(partition_id="0") + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) await receiver.receive(timeout=1) @@ -76,7 +76,7 @@ async def test_example_eventhub_async_sender_ops(live_eventhub_config, connectio # [START eventhub_client_async_sender_close] client = EventHubClient.from_connection_string(connection_str) - sender = client.create_sender(partition_id="0") + sender = client.create_producer(partition_id="0") try: await sender.send(EventData(b"A single event")) finally: @@ -92,7 +92,7 @@ async def test_example_eventhub_async_receiver_ops(live_eventhub_config, connect # [START eventhub_client_async_receiver_close] client = EventHubClient.from_connection_string(connection_str) - receiver = client.create_receiver(partition_id="0", consumer_group="$default", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) try: # Open and receive await receiver.receive(timeout=1) diff --git a/sdk/eventhub/azure-eventhubs/examples/batch_send.py b/sdk/eventhub/azure-eventhubs/examples/batch_send.py index bf80907a655e..e9a2e31a0092 100644 --- a/sdk/eventhub/azure-eventhubs/examples/batch_send.py +++ b/sdk/eventhub/azure-eventhubs/examples/batch_send.py @@ -33,7 +33,7 @@ raise ValueError("No EventHubs URL supplied.") client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) - sender = client.create_sender(partition_id="1") + sender = client.create_producer(partition_id="1") event_list = [] for i in range(1500): diff --git a/sdk/eventhub/azure-eventhubs/examples/client_secret_auth.py b/sdk/eventhub/azure-eventhubs/examples/client_secret_auth.py index 39697379f4d2..1a0840e02cfe 100644 --- a/sdk/eventhub/azure-eventhubs/examples/client_secret_auth.py +++ b/sdk/eventhub/azure-eventhubs/examples/client_secret_auth.py @@ -36,7 +36,7 @@ event_hub_path=EVENT_HUB, credential=credential) try: - sender = client.create_sender(partition_id='0') + sender = client.create_producer(partition_id='0') with sender: event = EventData(body='A single message') diff --git a/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py b/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py index 4ce8771425c6..b70b03284f65 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py +++ b/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py @@ -19,7 +19,7 @@ iot_connection_str = 'HostName=iothubfortrack2py.azure-devices.net;SharedAccessKeyName=iothubowner;SharedAccessKey=glF9a2n0D9fgmWpfTqjjmvkYt0WaTNqZx9GV/UKwDkQ=' # os.environ['IOTHUB_CONNECTION_STR'] client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) -receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') +receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') with receiver: received = receiver.receive(timeout=5) print(received) diff --git a/sdk/eventhub/azure-eventhubs/examples/iothub_send.py b/sdk/eventhub/azure-eventhubs/examples/iothub_send.py index 255deb6be516..152afd81355e 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iothub_send.py +++ b/sdk/eventhub/azure-eventhubs/examples/iothub_send.py @@ -21,7 +21,7 @@ client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) try: - sender = client.create_sender(operation='/messages/devicebound') + sender = client.create_producer(operation='/messages/devicebound') with sender: sender.send(EventData(b"A single event", to_device=iot_device_id)) diff --git a/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py b/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py index 4a22149139b5..22bd4b4e14ea 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py +++ b/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py @@ -37,7 +37,7 @@ def run(self): client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) -receiver = client.create_receiver(partition_id="0", event_position=EVENT_POSITION) +receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EVENT_POSITION) with receiver: thread = PartitionReceiverThread(receiver) thread.start() diff --git a/sdk/eventhub/azure-eventhubs/examples/proxy.py b/sdk/eventhub/azure-eventhubs/examples/proxy.py index 0417fe32d0f4..9eb74d12c418 100644 --- a/sdk/eventhub/azure-eventhubs/examples/proxy.py +++ b/sdk/eventhub/azure-eventhubs/examples/proxy.py @@ -39,8 +39,8 @@ client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False, http_proxy=HTTP_PROXY) try: - sender = client.create_sender(partition_id=PARTITION) - receiver = client.create_receiver(partition_id=PARTITION, event_position=EVENT_POSITION) + sender = client.create_producer(partition_id=PARTITION) + receiver = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EVENT_POSITION) receiver.receive(timeout=1) diff --git a/sdk/eventhub/azure-eventhubs/examples/recv.py b/sdk/eventhub/azure-eventhubs/examples/recv.py index a2ca9a32e6ee..e8609d91db97 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv.py @@ -33,7 +33,7 @@ network_tracing=False) try: - receiver = client.create_receiver(partition_id=PARTITION, event_position=EVENT_POSITION, prefetch=5000) + receiver = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EVENT_POSITION, prefetch=5000) with receiver: start_time = time.time() batch = receiver.receive(timeout=5000) diff --git a/sdk/eventhub/azure-eventhubs/examples/recv_batch.py b/sdk/eventhub/azure-eventhubs/examples/recv_batch.py index da451343d564..2b36f089274f 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv_batch.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv_batch.py @@ -34,7 +34,7 @@ client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) try: - receiver = client.create_receiver(partition_id=PARTITION, event_position=EVENT_POSITION, prefetch=100) + receiver = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EVENT_POSITION, prefetch=100) with receiver: batched_events = receiver.receive(max_batch_size=10) for event_data in batched_events: diff --git a/sdk/eventhub/azure-eventhubs/examples/recv_epoch.py b/sdk/eventhub/azure-eventhubs/examples/recv_epoch.py index c0cc9b341174..878b4d7de263 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv_epoch.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv_epoch.py @@ -30,8 +30,8 @@ PARTITION = "0" -async def pump(client, exclusive_receiver_priority): - receiver = client.create_receiver(partition_id=PARTITION, event_position=EventPosition("-1"), exclusive_receiver_priority=exclusive_receiver_priority) +async def pump(client, owner_level): + receiver = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EventPosition("-1"), owner_level=owner_level) async with receiver: total = 0 start_time = time.time() diff --git a/sdk/eventhub/azure-eventhubs/examples/send.py b/sdk/eventhub/azure-eventhubs/examples/send.py index 316a6e2739a5..7f319bcfb1dc 100644 --- a/sdk/eventhub/azure-eventhubs/examples/send.py +++ b/sdk/eventhub/azure-eventhubs/examples/send.py @@ -33,7 +33,7 @@ client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) - sender = client.create_sender(partition_id="0") + sender = client.create_producer(partition_id="0") ed = EventData("msg") diff --git a/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py b/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py index 2c70c735d4d3..d46f5a0b63e8 100644 --- a/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py +++ b/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py @@ -60,20 +60,20 @@ def test_example_eventhub_sync_send_and_receive(live_eventhub_config): # [START create_eventhub_client_sender] client = EventHubClient.from_connection_string(connection_str) # Create a sender. - sender = client.create_sender(partition_id="0") + sender = client.create_producer(partition_id="0") # [END create_eventhub_client_sender] # [START create_eventhub_client_receiver] client = EventHubClient.from_connection_string(connection_str) # Create a receiver. - receiver = client.create_receiver(partition_id="0", consumer_group="$default", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) # Create an exclusive receiver object. - exclusive_receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), exclusive_receiver_priority=1) + exclusive_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), owner_level=1) # [END create_eventhub_client_receiver] client = EventHubClient.from_connection_string(connection_str) - sender = client.create_sender(partition_id="0") - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + sender = client.create_producer(partition_id="0") + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) try: receiver.receive(timeout=1) @@ -112,7 +112,7 @@ def test_example_eventhub_sender_ops(live_eventhub_config, connection_str): # [START eventhub_client_sender_close] client = EventHubClient.from_connection_string(connection_str) - sender = client.create_sender(partition_id="0") + sender = client.create_producer(partition_id="0") try: sender.send(EventData(b"A single event")) finally: @@ -127,7 +127,7 @@ def test_example_eventhub_receiver_ops(live_eventhub_config, connection_str): # [START eventhub_client_receiver_close] client = EventHubClient.from_connection_string(connection_str) - receiver = client.create_receiver(partition_id="0", consumer_group="$default", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) try: receiver.receive(timeout=1) finally: diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_auth_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_auth_async.py index 5d135ef54506..7759d9643394 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_auth_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_auth_async.py @@ -26,8 +26,8 @@ async def test_client_secret_credential_async(aad_credential, live_eventhub): event_hub_path=live_eventhub['event_hub'], credential=credential, user_agent='customized information') - sender = client.create_sender(partition_id='0') - receiver = client.create_receiver(partition_id='0', event_position=EventPosition.latest()) + sender = client.create_producer(partition_id='0') + receiver = client.create_consumer(consumer_group="$default", partition_id='0', event_position=EventPosition.latest()) async with receiver: diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py index ceb3b05e5033..d443edafe2cb 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py @@ -25,7 +25,7 @@ async def pump(receiver, sleep=None): async def get_partitions(iot_connection_str): client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), prefetch=1000, operation='/messages/events') + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), prefetch=1000, operation='/messages/events') async with receiver: partitions = await client.get_properties() return partitions["partition_ids"] @@ -39,7 +39,7 @@ async def test_iothub_receive_multiple_async(iot_connection_str): client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) receivers = [] for p in partitions: - receivers.append(client.create_receiver(partition_id=p, event_position=EventPosition("-1"), prefetch=10, operation='/messages/events')) + receivers.append(client.create_consumer(consumer_group="$default", partition_id=p, event_position=EventPosition("-1"), prefetch=10, operation='/messages/events')) outputs = await asyncio.gather(*[pump(r) for r in receivers]) assert isinstance(outputs[0], int) and outputs[0] <= 10 diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph.py index 800bad9b0f74..41a7db1b444d 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph.py @@ -173,7 +173,7 @@ async def test_long_running_eph(live_eventhub): send_client = EventHubClient.from_connection_string(conn_str) pumps = [] for pid in ["0", "1"]: - sender = send_client.create_sender(partition_id=pid, send_timeout=0) + sender = send_client.create_producer(partition_id=pid, send_timeout=0) pumps.append(pump(pid, sender, 15)) results = await asyncio.gather(*pumps, return_exceptions=True) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py index f7cdb5c1de3e..957896c816f1 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_eph_with_context.py @@ -178,7 +178,7 @@ async def test_long_running_context_eph(live_eventhub): send_client = EventHubClient.from_connection_string(conn_str) pumps = [] for pid in ["0", "1"]: - sender = send_client.create_sender(partition_id=pid, send_timeout=0) + sender = send_client.create_producer(partition_id=pid, send_timeout=0) pumps.append(pump(pid, sender, 15)) results = await asyncio.gather(*pumps, return_exceptions=True) assert not any(results) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py index 63c7940a539e..ced71b804889 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py @@ -124,7 +124,7 @@ async def test_long_running_receive_async(connection_str): partitions = args.partitions.split(",") pumps = [] for pid in partitions: - receiver = client.create_receiver( + receiver = client.create_consumer(consumer_group="$default", partition_id=pid, event_position=EventPosition(args.offset), prefetch=50, diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py index 30b55af5f6ef..809fa3430b59 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_send_async.py @@ -113,12 +113,12 @@ async def test_long_running_partition_send_async(connection_str): partitions = args.partitions.split(",") pumps = [] for pid in partitions: - sender = client.create_sender(partition_id=pid, send_timeout=0) + sender = client.create_producer(partition_id=pid, send_timeout=0) pumps.append(pump(pid, sender, args, args.duration)) results = await asyncio.gather(*pumps, return_exceptions=True) assert not results except Exception as e: - logger.error("EventSender failed: {}".format(e)) + logger.error("EventHubProducer failed: {}".format(e)) if __name__ == '__main__': diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py index d55d71585b59..27deb42c3aeb 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_negative_async.py @@ -27,7 +27,7 @@ async def test_send_with_invalid_hostname_async(invalid_hostname, connstr_receivers): _, receivers = connstr_receivers client = EventHubClient.from_connection_string(invalid_hostname, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() with pytest.raises(AuthenticationError): await sender._open() @@ -36,7 +36,7 @@ async def test_send_with_invalid_hostname_async(invalid_hostname, connstr_receiv @pytest.mark.asyncio async def test_receive_with_invalid_hostname_async(invalid_hostname): client = EventHubClient.from_connection_string(invalid_hostname, network_tracing=False) - sender = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) + sender = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): await sender._open() @@ -46,7 +46,7 @@ async def test_receive_with_invalid_hostname_async(invalid_hostname): async def test_send_with_invalid_key_async(invalid_key, connstr_receivers): _, receivers = connstr_receivers client = EventHubClient.from_connection_string(invalid_key, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() with pytest.raises(AuthenticationError): await sender._open() @@ -55,7 +55,7 @@ async def test_send_with_invalid_key_async(invalid_key, connstr_receivers): @pytest.mark.asyncio async def test_receive_with_invalid_key_async(invalid_key): client = EventHubClient.from_connection_string(invalid_key, network_tracing=False) - sender = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) + sender = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): await sender._open() @@ -65,7 +65,7 @@ async def test_receive_with_invalid_key_async(invalid_key): async def test_send_with_invalid_policy_async(invalid_policy, connstr_receivers): _, receivers = connstr_receivers client = EventHubClient.from_connection_string(invalid_policy, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() with pytest.raises(AuthenticationError): await sender._open() @@ -74,7 +74,7 @@ async def test_send_with_invalid_policy_async(invalid_policy, connstr_receivers) @pytest.mark.asyncio async def test_receive_with_invalid_policy_async(invalid_policy): client = EventHubClient.from_connection_string(invalid_policy, network_tracing=False) - sender = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) + sender = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): await sender._open() @@ -82,12 +82,11 @@ async def test_receive_with_invalid_policy_async(invalid_policy): @pytest.mark.liveTest @pytest.mark.asyncio async def test_send_partition_key_with_partition_async(connection_str): - pytest.skip("Skipped tentatively. Confirm whether to throw ValueError or just warn users") + pytest.skip("No longer raise value error. EventData will be sent to partition_id") client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender(partition_id="1") + sender = client.create_producer(partition_id="1") try: data = EventData(b"Data") - data.partition_key = b"PKey" with pytest.raises(ValueError): await sender.send(data) finally: @@ -98,7 +97,7 @@ async def test_send_partition_key_with_partition_async(connection_str): @pytest.mark.asyncio async def test_non_existing_entity_sender_async(connection_str): client = EventHubClient.from_connection_string(connection_str, event_hub_path="nemo", network_tracing=False) - sender = client.create_sender(partition_id="1") + sender = client.create_producer(partition_id="1") with pytest.raises(AuthenticationError): await sender._open() @@ -107,7 +106,7 @@ async def test_non_existing_entity_sender_async(connection_str): @pytest.mark.asyncio async def test_non_existing_entity_receiver_async(connection_str): client = EventHubClient.from_connection_string(connection_str, event_hub_path="nemo", network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): await receiver._open() @@ -118,7 +117,7 @@ async def test_receive_from_invalid_partitions_async(connection_str): partitions = ["XYZ", "-1", "1000", "-" ] for p in partitions: client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id=p, event_position=EventPosition("-1")) + receiver = client.create_consumer(consumer_group="$default", partition_id=p, event_position=EventPosition("-1")) try: with pytest.raises(ConnectError): await receiver.receive(timeout=10) @@ -132,7 +131,7 @@ async def test_send_to_invalid_partitions_async(connection_str): partitions = ["XYZ", "-1", "1000", "-" ] for p in partitions: client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender(partition_id=p) + sender = client.create_producer(partition_id=p) try: with pytest.raises(ConnectError): await sender._open() @@ -146,7 +145,7 @@ async def test_send_too_large_message_async(connection_str): if sys.platform.startswith('darwin'): pytest.skip("Skipping on OSX - open issue regarding message size") client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() try: data = EventData(b"A" * 1100000) with pytest.raises(EventDataSendError): @@ -159,7 +158,7 @@ async def test_send_too_large_message_async(connection_str): @pytest.mark.asyncio async def test_send_null_body_async(connection_str): client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() try: with pytest.raises(ValueError): data = EventData(None) @@ -187,7 +186,7 @@ async def test_max_receivers_async(connstr_senders): client = EventHubClient.from_connection_string(connection_str, network_tracing=False) receivers = [] for i in range(6): - receivers.append(client.create_receiver(partition_id="0", prefetch=1000, event_position=EventPosition('@latest'))) + receivers.append(client.create_consumer(consumer_group="$default", partition_id="0", prefetch=1000, event_position=EventPosition('@latest'))) outputs = await asyncio.gather( pump(receivers[0]), diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py index 55822c9f89d1..1132d4854b2e 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py @@ -18,7 +18,7 @@ async def test_receive_end_of_stream_async(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 @@ -34,7 +34,7 @@ async def test_receive_end_of_stream_async(connstr_senders): async def test_receive_with_offset_async(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 @@ -44,7 +44,7 @@ async def test_receive_with_offset_async(connstr_senders): assert len(received) == 1 offset = received[0].offset - offset_receiver = client.create_receiver(partition_id="0", event_position=offset) + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=offset) async with offset_receiver: received = await offset_receiver.receive(timeout=5) assert len(received) == 0 @@ -58,7 +58,7 @@ async def test_receive_with_offset_async(connstr_senders): async def test_receive_with_inclusive_offset_async(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 @@ -68,7 +68,7 @@ async def test_receive_with_inclusive_offset_async(connstr_senders): assert len(received) == 1 offset = received[0].offset - offset_receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset.value, inclusive=True)) + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset.value, inclusive=True)) async with offset_receiver: received = await offset_receiver.receive(timeout=5) assert len(received) == 1 @@ -79,7 +79,7 @@ async def test_receive_with_inclusive_offset_async(connstr_senders): async def test_receive_with_datetime_async(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 @@ -88,7 +88,7 @@ async def test_receive_with_datetime_async(connstr_senders): assert len(received) == 1 offset = received[0].enqueued_time - offset_receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset)) + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset)) async with offset_receiver: received = await offset_receiver.receive(timeout=5) assert len(received) == 0 @@ -103,7 +103,7 @@ async def test_receive_with_datetime_async(connstr_senders): async def test_receive_with_sequence_no_async(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 @@ -112,7 +112,7 @@ async def test_receive_with_sequence_no_async(connstr_senders): assert len(received) == 1 offset = received[0].sequence_number - offset_receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset)) + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset)) async with offset_receiver: received = await offset_receiver.receive(timeout=5) assert len(received) == 0 @@ -127,7 +127,7 @@ async def test_receive_with_sequence_no_async(connstr_senders): async def test_receive_with_inclusive_sequence_no_async(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 @@ -136,7 +136,7 @@ async def test_receive_with_inclusive_sequence_no_async(connstr_senders): assert len(received) == 1 offset = received[0].sequence_number - offset_receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset, inclusive=True)) + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset, inclusive=True)) async with offset_receiver: received = await offset_receiver.receive(timeout=5) assert len(received) == 1 @@ -147,7 +147,7 @@ async def test_receive_with_inclusive_sequence_no_async(connstr_senders): async def test_receive_batch_async(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest'), prefetch=500) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest'), prefetch=500) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 @@ -179,8 +179,8 @@ async def test_exclusive_receiver_async(connstr_senders): senders[0].send(EventData(b"Receiving only a single event")) client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver1 = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), exclusive_receiver_priority=10, prefetch=5) - receiver2 = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), exclusive_receiver_priority=20, prefetch=10) + receiver1 = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), owner_level=10, prefetch=5) + receiver2 = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), owner_level=20, prefetch=10) try: await pump(receiver1) output2 = await pump(receiver2) @@ -203,7 +203,7 @@ async def test_multiple_receiver_async(connstr_senders): assert partitions["partition_ids"] == ["0", "1"] receivers = [] for i in range(2): - receivers.append(client.create_receiver(partition_id="0", event_position=EventPosition("-1"), prefetch=10)) + receivers.append(client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), prefetch=10)) try: more_partitions = await client.get_properties() assert more_partitions["partition_ids"] == ["0", "1"] @@ -224,8 +224,8 @@ async def test_exclusive_receiver_after_non_exclusive_receiver_async(connstr_sen senders[0].send(EventData(b"Receiving only a single event")) client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver1 = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), prefetch=10) - receiver2 = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), exclusive_receiver_priority=15, prefetch=10) + receiver1 = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), prefetch=10) + receiver2 = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), owner_level=15, prefetch=10) try: await pump(receiver1) output2 = await pump(receiver2) @@ -244,8 +244,8 @@ async def test_non_exclusive_receiver_after_exclusive_receiver_async(connstr_sen senders[0].send(EventData(b"Receiving only a single event")) client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver1 = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), exclusive_receiver_priority=15, prefetch=10) - receiver2 = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), prefetch=10) + receiver1 = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), owner_level=15, prefetch=10) + receiver2 = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), prefetch=10) try: output1 = await pump(receiver1) with pytest.raises(ConnectError): @@ -275,7 +275,7 @@ def batched(): yield ed client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest'), prefetch=500) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest'), prefetch=500) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 @@ -298,7 +298,7 @@ def batched(): async def test_receive_over_websocket_async(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, transport_type=TransportType.AmqpOverWebsocket, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest'), prefetch=500) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest'), prefetch=500) event_list = [] for i in range(20): diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receiver_iterator_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receiver_iterator_async.py index b087bb783bb1..366f1fa80880 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receiver_iterator_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receiver_iterator_async.py @@ -18,7 +18,7 @@ async def test_receive_iterator_async(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) async with receiver: received = await receiver.receive(timeout=5) assert len(received) == 0 diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py index f7a39c6503aa..866d5996dc68 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py @@ -15,14 +15,14 @@ EventHubError) from azure.eventhub.aio import EventHubClient -SLEEP = False +SLEEP = True @pytest.mark.liveTest @pytest.mark.asyncio async def test_send_with_long_interval_async(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() try: await sender.send(EventData(b"A single event")) for _ in range(1): @@ -59,7 +59,7 @@ def pump(receiver): async def test_send_with_forced_conn_close_async(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() try: await sender.send(EventData(b"A single event")) if SLEEP: diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py index 54adec39ef34..f969fc533e36 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py @@ -20,7 +20,7 @@ async def test_send_with_partition_key_async(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() async with sender: data_val = 0 @@ -48,7 +48,7 @@ async def test_send_with_partition_key_async(connstr_receivers): async def test_send_and_receive_zero_length_body_async(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() async with sender: await sender.send(EventData("")) @@ -65,7 +65,7 @@ async def test_send_and_receive_zero_length_body_async(connstr_receivers): async def test_send_single_event_async(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() async with sender: await sender.send(EventData(b"A single event")) @@ -87,7 +87,7 @@ def batched(): yield EventData("Event number {}".format(i)) client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() async with sender: await sender.send(batched()) @@ -106,7 +106,7 @@ def batched(): async def test_send_partition_async(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender(partition_id="1") + sender = client.create_producer(partition_id="1") async with sender: await sender.send(EventData(b"Data")) @@ -121,7 +121,7 @@ async def test_send_partition_async(connstr_receivers): async def test_send_non_ascii_async(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender(partition_id="0") + sender = client.create_producer(partition_id="0") async with sender: await sender.send(EventData("é,è,à,ù,â,ê,î,ô,û")) await sender.send(EventData(json.dumps({"foo": "漢字"}))) @@ -142,7 +142,7 @@ def batched(): yield EventData("Event number {}".format(i)) client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender(partition_id="1") + sender = client.create_producer(partition_id="1") async with sender: await sender.send(batched()) @@ -157,7 +157,7 @@ def batched(): async def test_send_array_async(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() async with sender: await sender.send(EventData([b"A", b"B", b"C"])) @@ -174,8 +174,8 @@ async def test_send_array_async(connstr_receivers): async def test_send_multiple_clients_async(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender_0 = client.create_sender(partition_id="0") - sender_1 = client.create_sender(partition_id="1") + sender_0 = client.create_producer(partition_id="0") + sender_1 = client.create_producer(partition_id="1") async with sender_0: await sender_0.send(EventData(b"Message 0")) async with sender_1: @@ -206,7 +206,7 @@ def batched(): yield ed client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() async with sender: await sender.send(batched()) @@ -228,7 +228,7 @@ def batched(): async def test_send_over_websocket_async(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, transport_type=TransportType.AmqpOverWebsocket, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() event_list = [] for i in range(20): diff --git a/sdk/eventhub/azure-eventhubs/tests/test_auth.py b/sdk/eventhub/azure-eventhubs/tests/test_auth.py index c4f28f1fe0ac..5a1a73875483 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_auth.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_auth.py @@ -22,8 +22,8 @@ def test_client_secret_credential(aad_credential, live_eventhub): event_hub_path=live_eventhub['event_hub'], credential=credential, user_agent='customized information') - sender = client.create_sender(partition_id='0') - receiver = client.create_receiver(partition_id='0', event_position=EventPosition.latest()) + sender = client.create_producer(partition_id='0') + receiver = client.create_consumer(consumer_group="$default", partition_id='0', event_position=EventPosition.latest()) with receiver: received = receiver.receive(timeout=1) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py index 372ada0bf50d..ce060aecbb6d 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py @@ -15,7 +15,7 @@ def test_iothub_receive_sync(iot_connection_str, device_id): pytest.skip("current code will cause ErrorCodes.LinkRedirect") client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') receiver._open() try: partitions = client.get_properties() diff --git a/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py b/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py index bc2f92d14cdf..c94f1f81c6c8 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py @@ -17,7 +17,7 @@ @pytest.mark.liveTest def test_iothub_send_single_event(iot_connection_str, device_id): client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) - sender = client.create_sender(operation='/messages/devicebound') + sender = client.create_producer(operation='/messages/devicebound') try: sender.send(EventData(b"A single event", to_device=device_id)) finally: diff --git a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py index b7477bc3e39f..ed612292846d 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py @@ -77,7 +77,7 @@ def pump(receivers, duration): batch[-1].offset.value)) print("Total received {}".format(total)) except Exception as e: - print("EventReceiver failed: {}".format(e)) + print("EventHubConsumer failed: {}".format(e)) raise @@ -119,7 +119,7 @@ def test_long_running_receive(connection_str): partitions = args.partitions.split(",") pumps = {} for pid in partitions: - pumps[pid] = client.create_receiver( + pumps[pid] = client.create_consumer(consumer_group="$default", partition_id=pid, event_position=EventPosition(args.offset), prefetch=50) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py index e0f535faf8bb..e4826d05e3fa 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_send.py @@ -17,7 +17,7 @@ import pytest from logging.handlers import RotatingFileHandler -from azure.eventhub import EventHubClient, EventSender, EventData, EventHubSharedKeyCredential +from azure.eventhub import EventHubClient, EventHubProducer, EventData, EventHubSharedKeyCredential def get_logger(filename, level=logging.INFO): @@ -51,7 +51,7 @@ def check_send_successful(outcome, condition): def main(client, args): - sender = client.create_sender() + sender = client.create_producer() deadline = time.time() + args.duration total = 0 diff --git a/sdk/eventhub/azure-eventhubs/tests/test_negative.py b/sdk/eventhub/azure-eventhubs/tests/test_negative.py index 87f23d029aaf..1bf9855c80eb 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_negative.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_negative.py @@ -24,7 +24,7 @@ def test_send_with_invalid_hostname(invalid_hostname, connstr_receivers): _, receivers = connstr_receivers client = EventHubClient.from_connection_string(invalid_hostname, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() with pytest.raises(AuthenticationError): sender._open() @@ -32,7 +32,7 @@ def test_send_with_invalid_hostname(invalid_hostname, connstr_receivers): @pytest.mark.liveTest def test_receive_with_invalid_hostname_sync(invalid_hostname): client = EventHubClient.from_connection_string(invalid_hostname, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): receiver._open() @@ -41,7 +41,7 @@ def test_receive_with_invalid_hostname_sync(invalid_hostname): def test_send_with_invalid_key(invalid_key, connstr_receivers): _, receivers = connstr_receivers client = EventHubClient.from_connection_string(invalid_key, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() with pytest.raises(AuthenticationError): sender._open() @@ -49,7 +49,7 @@ def test_send_with_invalid_key(invalid_key, connstr_receivers): @pytest.mark.liveTest def test_receive_with_invalid_key_sync(invalid_key): client = EventHubClient.from_connection_string(invalid_key, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): receiver._open() @@ -58,7 +58,7 @@ def test_receive_with_invalid_key_sync(invalid_key): def test_send_with_invalid_policy(invalid_policy, connstr_receivers): _, receivers = connstr_receivers client = EventHubClient.from_connection_string(invalid_policy, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() with pytest.raises(AuthenticationError): sender._open() @@ -66,7 +66,7 @@ def test_send_with_invalid_policy(invalid_policy, connstr_receivers): @pytest.mark.liveTest def test_receive_with_invalid_policy_sync(invalid_policy): client = EventHubClient.from_connection_string(invalid_policy, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): receiver._open() @@ -75,7 +75,7 @@ def test_receive_with_invalid_policy_sync(invalid_policy): def test_send_partition_key_with_partition_sync(connection_str): pytest.skip("Skipped tentatively. Confirm whether to throw ValueError or just warn users") client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender(partition_id="1") + sender = client.create_producer(partition_id="1") try: data = EventData(b"Data") data.partition_key = b"PKey" @@ -88,7 +88,7 @@ def test_send_partition_key_with_partition_sync(connection_str): @pytest.mark.liveTest def test_non_existing_entity_sender(connection_str): client = EventHubClient.from_connection_string(connection_str, event_hub_path="nemo", network_tracing=False) - sender = client.create_sender(partition_id="1") + sender = client.create_producer(partition_id="1") with pytest.raises(AuthenticationError): sender._open() @@ -96,7 +96,7 @@ def test_non_existing_entity_sender(connection_str): @pytest.mark.liveTest def test_non_existing_entity_receiver(connection_str): client = EventHubClient.from_connection_string(connection_str, event_hub_path="nemo", network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition("-1")) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1")) with pytest.raises(AuthenticationError): receiver._open() @@ -106,7 +106,7 @@ def test_receive_from_invalid_partitions_sync(connection_str): partitions = ["XYZ", "-1", "1000", "-" ] for p in partitions: client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id=p, event_position=EventPosition("-1")) + receiver = client.create_consumer(consumer_group="$default", partition_id=p, event_position=EventPosition("-1")) try: with pytest.raises(ConnectError): receiver.receive(timeout=10) @@ -119,7 +119,7 @@ def test_send_to_invalid_partitions(connection_str): partitions = ["XYZ", "-1", "1000", "-" ] for p in partitions: client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender(partition_id=p) + sender = client.create_producer(partition_id=p) try: with pytest.raises(ConnectError): sender._open() @@ -132,7 +132,7 @@ def test_send_too_large_message(connection_str): if sys.platform.startswith('darwin'): pytest.skip("Skipping on OSX - open issue regarding message size") client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() try: data = EventData(b"A" * 1100000) with pytest.raises(EventDataSendError): @@ -145,7 +145,7 @@ def test_send_too_large_message(connection_str): def test_send_null_body(connection_str): partitions = ["XYZ", "-1", "1000", "-"] client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() try: with pytest.raises(ValueError): data = EventData(None) @@ -158,7 +158,7 @@ def test_send_null_body(connection_str): def test_message_body_types(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) try: received = receiver.receive(timeout=5) assert len(received) == 0 diff --git a/sdk/eventhub/azure-eventhubs/tests/test_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_receive.py index 10cf1575cbb8..fd1b93d75287 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_receive.py @@ -15,7 +15,7 @@ # def test_receive_without_events(connstr_senders): # connection_str, senders = connstr_senders # client = EventHubClient.from_connection_string(connection_str, network_tracing=False) -# receiver = client.create_receiver("$default", "0", event_position=EventPosition('@latest')) +# receiver = client.create_consumer(consumer_group="$default", partition_id"$default", "0", event_position=EventPosition('@latest')) # finish = datetime.datetime.now() + datetime.timedelta(seconds=240) # count = 0 # try: @@ -37,7 +37,7 @@ def test_receive_end_of_stream(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 @@ -55,7 +55,7 @@ def test_receive_with_offset_sync(connstr_senders): client = EventHubClient.from_connection_string(connection_str, network_tracing=False) partitions = client.get_properties() assert partitions["partition_ids"] == ["0", "1"] - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) with receiver: more_partitions = client.get_properties() assert more_partitions["partition_ids"] == ["0", "1"] @@ -70,7 +70,7 @@ def test_receive_with_offset_sync(connstr_senders): assert list(received[0].body) == [b'Data'] assert received[0].body_as_str() == "Data" - offset_receiver = client.create_receiver(partition_id="0", event_position=offset) + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=offset) with offset_receiver: received = offset_receiver.receive(timeout=5) assert len(received) == 0 @@ -83,7 +83,7 @@ def test_receive_with_offset_sync(connstr_senders): def test_receive_with_inclusive_offset(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) with receiver: received = receiver.receive(timeout=5) @@ -97,7 +97,7 @@ def test_receive_with_inclusive_offset(connstr_senders): assert list(received[0].body) == [b'Data'] assert received[0].body_as_str() == "Data" - offset_receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset.value, inclusive=True)) + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset.value, inclusive=True)) with offset_receiver: received = offset_receiver.receive(timeout=5) assert len(received) == 1 @@ -109,7 +109,7 @@ def test_receive_with_datetime_sync(connstr_senders): client = EventHubClient.from_connection_string(connection_str, network_tracing=False) partitions = client.get_properties() assert partitions["partition_ids"] == ["0", "1"] - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) with receiver: more_partitions = client.get_properties() assert more_partitions["partition_ids"] == ["0", "1"] @@ -123,7 +123,7 @@ def test_receive_with_datetime_sync(connstr_senders): assert list(received[0].body) == [b'Data'] assert received[0].body_as_str() == "Data" - offset_receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset)) + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset)) with offset_receiver: received = offset_receiver.receive(timeout=5) assert len(received) == 0 @@ -145,7 +145,7 @@ def test_receive_with_custom_datetime_sync(connstr_senders): for i in range(5): senders[0].send(EventData(b"Message after timestamp")) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset)) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset)) with receiver: all_received = [] received = receiver.receive(timeout=1) @@ -163,7 +163,7 @@ def test_receive_with_custom_datetime_sync(connstr_senders): def test_receive_with_sequence_no(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) with receiver: received = receiver.receive(timeout=5) @@ -174,7 +174,7 @@ def test_receive_with_sequence_no(connstr_senders): assert len(received) == 1 offset = received[0].sequence_number - offset_receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset, False)) + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset, False)) with offset_receiver: received = offset_receiver.receive(timeout=5) assert len(received) == 0 @@ -187,7 +187,7 @@ def test_receive_with_sequence_no(connstr_senders): def test_receive_with_inclusive_sequence_no(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 @@ -195,7 +195,7 @@ def test_receive_with_inclusive_sequence_no(connstr_senders): received = receiver.receive(timeout=5) assert len(received) == 1 offset = received[0].sequence_number - offset_receiver = client.create_receiver(partition_id="0", event_position=EventPosition(offset, inclusive=True)) + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset, inclusive=True)) with offset_receiver: received = offset_receiver.receive(timeout=5) assert len(received) == 1 @@ -205,7 +205,7 @@ def test_receive_with_inclusive_sequence_no(connstr_senders): def test_receive_batch(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest'), prefetch=500) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest'), prefetch=500) with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 @@ -234,7 +234,7 @@ def batched(): yield ed client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest'), prefetch=500) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest'), prefetch=500) with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 @@ -256,7 +256,7 @@ def batched(): def test_receive_over_websocket_sync(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, transport_type=TransportType.AmqpOverWebsocket, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest'), prefetch=500) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest'), prefetch=500) event_list = [] for i in range(20): diff --git a/sdk/eventhub/azure-eventhubs/tests/test_receiver_iterator.py b/sdk/eventhub/azure-eventhubs/tests/test_receiver_iterator.py index 6f0dd3456df6..803d27953341 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_receiver_iterator.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_receiver_iterator.py @@ -16,7 +16,7 @@ def test_receive_iterator(connstr_senders): connection_str, senders = connstr_senders client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - receiver = client.create_receiver(partition_id="0", event_position=EventPosition('@latest')) + receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) with receiver: received = receiver.receive(timeout=5) assert len(received) == 0 diff --git a/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py b/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py index ef81d72782f5..376137534a5a 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py @@ -14,13 +14,13 @@ EventHubError, EventHubClient) -SLEEP = False +SLEEP = True @pytest.mark.liveTest def test_send_with_long_interval_sync(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() with sender: sender.send(EventData(b"A single event")) for _ in range(1): @@ -44,7 +44,7 @@ def test_send_with_long_interval_sync(connstr_receivers): def test_send_with_forced_conn_close_sync(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() with sender: sender.send(EventData(b"A single event")) sender._handler._connection._conn.destroy() diff --git a/sdk/eventhub/azure-eventhubs/tests/test_send.py b/sdk/eventhub/azure-eventhubs/tests/test_send.py index efec47dcf140..222c85247f9a 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_send.py @@ -17,7 +17,7 @@ def test_send_with_partition_key(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() with sender: data_val = 0 for partition in [b"a", b"b", b"c", b"d", b"e", b"f"]: @@ -45,7 +45,7 @@ def test_send_and_receive_large_body_size(connstr_receivers): pytest.skip("Skipping on OSX - open issue regarding message size") connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() with sender: payload = 250 * 1024 sender.send(EventData("A" * payload)) @@ -62,7 +62,7 @@ def test_send_and_receive_large_body_size(connstr_receivers): def test_send_and_receive_zero_length_body(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() with sender: sender.send(EventData("")) @@ -78,7 +78,7 @@ def test_send_and_receive_zero_length_body(connstr_receivers): def test_send_single_event(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() with sender: sender.send(EventData(b"A single event")) @@ -99,7 +99,7 @@ def batched(): yield EventData("Event number {}".format(i)) client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() with sender: sender.send(batched()) @@ -117,7 +117,7 @@ def batched(): def test_send_partition(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender(partition_id="1") + sender = client.create_producer(partition_id="1") with sender: sender.send(EventData(b"Data")) @@ -131,7 +131,7 @@ def test_send_partition(connstr_receivers): def test_send_non_ascii(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender(partition_id="0") + sender = client.create_producer(partition_id="0") with sender: sender.send(EventData(u"é,è,à,ù,â,ê,î,ô,û")) sender.send(EventData(json.dumps({"foo": u"漢字"}))) @@ -151,7 +151,7 @@ def batched(): yield EventData("Event number {}".format(i)) client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender(partition_id="1") + sender = client.create_producer(partition_id="1") with sender: sender.send(batched()) time.sleep(1) @@ -166,7 +166,7 @@ def batched(): def test_send_array_sync(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() with sender: sender.send(EventData([b"A", b"B", b"C"])) @@ -182,8 +182,8 @@ def test_send_array_sync(connstr_receivers): def test_send_multiple_clients(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender_0 = client.create_sender(partition_id="0") - sender_1 = client.create_sender(partition_id="1") + sender_0 = client.create_producer(partition_id="0") + sender_1 = client.create_producer(partition_id="1") with sender_0: sender_0.send(EventData(b"Message 0")) with sender_1: @@ -213,7 +213,7 @@ def batched(): yield ed client = EventHubClient.from_connection_string(connection_str, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() with sender: sender.send(batched()) @@ -234,7 +234,7 @@ def batched(): def test_send_over_websocket_sync(connstr_receivers): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, transport_type=TransportType.AmqpOverWebsocket, network_tracing=False) - sender = client.create_sender() + sender = client.create_producer() event_list = [] for i in range(20): From a73bf7b9acadc2454be807f00a17641c140db90c Mon Sep 17 00:00:00 2001 From: yijxie Date: Wed, 19 Jun 2019 11:40:24 -0700 Subject: [PATCH 16/54] Avoid nested with statement --- .../tests/asynctests/test_receive_async.py | 65 +++++++++---------- .../azure-eventhubs/tests/test_receive.py | 63 +++++++++--------- 2 files changed, 63 insertions(+), 65 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py index 1132d4854b2e..3d0cf38ca743 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py @@ -44,13 +44,13 @@ async def test_receive_with_offset_async(connstr_senders): assert len(received) == 1 offset = received[0].offset - offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=offset) - async with offset_receiver: - received = await offset_receiver.receive(timeout=5) - assert len(received) == 0 - senders[0].send(EventData(b"Message after offset")) - received = await offset_receiver.receive(timeout=5) - assert len(received) == 1 + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=offset) + async with offset_receiver: + received = await offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message after offset")) + received = await offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest @@ -68,10 +68,10 @@ async def test_receive_with_inclusive_offset_async(connstr_senders): assert len(received) == 1 offset = received[0].offset - offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset.value, inclusive=True)) - async with offset_receiver: - received = await offset_receiver.receive(timeout=5) - assert len(received) == 1 + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset.value, inclusive=True)) + async with offset_receiver: + received = await offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest @@ -88,14 +88,14 @@ async def test_receive_with_datetime_async(connstr_senders): assert len(received) == 1 offset = received[0].enqueued_time - offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset)) - async with offset_receiver: - received = await offset_receiver.receive(timeout=5) - assert len(received) == 0 - senders[0].send(EventData(b"Message after timestamp")) - time.sleep(1) - received = await offset_receiver.receive(timeout=5) - assert len(received) == 1 + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset)) + async with offset_receiver: + received = await offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message after timestamp")) + time.sleep(1) + received = await offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest @@ -112,14 +112,14 @@ async def test_receive_with_sequence_no_async(connstr_senders): assert len(received) == 1 offset = received[0].sequence_number - offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset)) - async with offset_receiver: - received = await offset_receiver.receive(timeout=5) - assert len(received) == 0 - senders[0].send(EventData(b"Message next in sequence")) - time.sleep(1) - received = await offset_receiver.receive(timeout=5) - assert len(received) == 1 + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset)) + async with offset_receiver: + received = await offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message next in sequence")) + time.sleep(1) + received = await offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest @@ -136,10 +136,10 @@ async def test_receive_with_inclusive_sequence_no_async(connstr_senders): assert len(received) == 1 offset = received[0].sequence_number - offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset, inclusive=True)) - async with offset_receiver: - received = await offset_receiver.receive(timeout=5) - assert len(received) == 1 + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset, inclusive=True)) + async with offset_receiver: + received = await offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest @@ -308,8 +308,7 @@ async def test_receive_over_websocket_async(connstr_senders): received = await receiver.receive(timeout=5) assert len(received) == 0 - with senders[0]: - senders[0].send(event_list) + senders[0].send(event_list) time.sleep(1) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_receive.py index fd1b93d75287..5b79c8c7ff06 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_receive.py @@ -70,13 +70,13 @@ def test_receive_with_offset_sync(connstr_senders): assert list(received[0].body) == [b'Data'] assert received[0].body_as_str() == "Data" - offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=offset) - with offset_receiver: - received = offset_receiver.receive(timeout=5) - assert len(received) == 0 - senders[0].send(EventData(b"Message after offset")) - received = offset_receiver.receive(timeout=5) - assert len(received) == 1 + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=offset) + with offset_receiver: + received = offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message after offset")) + received = offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest @@ -97,10 +97,10 @@ def test_receive_with_inclusive_offset(connstr_senders): assert list(received[0].body) == [b'Data'] assert received[0].body_as_str() == "Data" - offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset.value, inclusive=True)) - with offset_receiver: - received = offset_receiver.receive(timeout=5) - assert len(received) == 1 + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset.value, inclusive=True)) + with offset_receiver: + received = offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest @@ -123,13 +123,13 @@ def test_receive_with_datetime_sync(connstr_senders): assert list(received[0].body) == [b'Data'] assert received[0].body_as_str() == "Data" - offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset)) - with offset_receiver: - received = offset_receiver.receive(timeout=5) - assert len(received) == 0 - senders[0].send(EventData(b"Message after timestamp")) - received = offset_receiver.receive(timeout=5) - assert len(received) == 1 + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset)) + with offset_receiver: + received = offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message after timestamp")) + received = offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest @@ -174,14 +174,14 @@ def test_receive_with_sequence_no(connstr_senders): assert len(received) == 1 offset = received[0].sequence_number - offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset, False)) - with offset_receiver: - received = offset_receiver.receive(timeout=5) - assert len(received) == 0 - senders[0].send(EventData(b"Message next in sequence")) - time.sleep(1) - received = offset_receiver.receive(timeout=5) - assert len(received) == 1 + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset, False)) + with offset_receiver: + received = offset_receiver.receive(timeout=5) + assert len(received) == 0 + senders[0].send(EventData(b"Message next in sequence")) + time.sleep(1) + received = offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest def test_receive_with_inclusive_sequence_no(connstr_senders): @@ -195,10 +195,10 @@ def test_receive_with_inclusive_sequence_no(connstr_senders): received = receiver.receive(timeout=5) assert len(received) == 1 offset = received[0].sequence_number - offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset, inclusive=True)) - with offset_receiver: - received = offset_receiver.receive(timeout=5) - assert len(received) == 1 + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset, inclusive=True)) + with offset_receiver: + received = offset_receiver.receive(timeout=5) + assert len(received) == 1 @pytest.mark.liveTest @@ -266,8 +266,7 @@ def test_receive_over_websocket_sync(connstr_senders): received = receiver.receive(timeout=5) assert len(received) == 0 - with senders[0] as sender: - sender.send(event_list) + senders[0].send(event_list) time.sleep(1) From e47ce295486c85bd3809658c71ba4467c1a08483 Mon Sep 17 00:00:00 2001 From: yijxie Date: Wed, 19 Jun 2019 14:35:39 -0700 Subject: [PATCH 17/54] Skip forced reconnect test --- .../azure-eventhubs/tests/asynctests/test_reconnect_async.py | 1 + sdk/eventhub/azure-eventhubs/tests/test_reconnect.py | 1 + 2 files changed, 2 insertions(+) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py index 866d5996dc68..7ce3afc047ba 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py @@ -57,6 +57,7 @@ def pump(receiver): @pytest.mark.liveTest @pytest.mark.asyncio async def test_send_with_forced_conn_close_async(connstr_receivers): + pytest.skip("This test is similar to the above one") connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_producer() diff --git a/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py b/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py index 376137534a5a..67df0cb0d376 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py @@ -42,6 +42,7 @@ def test_send_with_long_interval_sync(connstr_receivers): @pytest.mark.liveTest def test_send_with_forced_conn_close_sync(connstr_receivers): + pytest.skip("This test is similar to the above one") connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_producer() From ef3ac561c207a1459e6936d5cd18860b07ee16ae Mon Sep 17 00:00:00 2001 From: Yunhao Ling <47871814+yunhaoling@users.noreply.github.com> Date: Wed, 19 Jun 2019 23:00:04 -0700 Subject: [PATCH 18/54] Update naming in eventhub (consumer and producer). (#5984) --- .../eventhub/aio/event_hubs_client_async.py | 20 ++++----- .../azure/eventhub/aio/receiver_async.py | 8 ++-- .../azure/eventhub/aio/sender_async.py | 8 ++-- .../azure-eventhubs/azure/eventhub/client.py | 12 ++--- .../azure/eventhub/client_abstract.py | 6 +-- .../azure-eventhubs/azure/eventhub/common.py | 2 +- .../azure/eventhub/receiver.py | 8 ++-- .../azure-eventhubs/azure/eventhub/sender.py | 8 ++-- .../async_examples/iterator_receiver_async.py | 14 +++--- .../examples/async_examples/recv_async.py | 8 ++-- .../examples/async_examples/send_async.py | 10 ++--- .../test_examples_eventhub_async.py | 38 ++++++++-------- .../azure-eventhubs/examples/batch_send.py | 6 +-- .../examples/client_secret_auth.py | 6 +-- .../azure-eventhubs/examples/iothub_recv.py | 6 +-- .../azure-eventhubs/examples/iothub_send.py | 6 +-- .../examples/iterator_receiver.py | 14 +++--- .../azure-eventhubs/examples/proxy.py | 14 +++--- sdk/eventhub/azure-eventhubs/examples/recv.py | 8 ++-- .../azure-eventhubs/examples/recv_batch.py | 6 +-- .../azure-eventhubs/examples/recv_epoch.py | 9 ++-- sdk/eventhub/azure-eventhubs/examples/send.py | 6 +-- .../examples/test_examples_eventhub.py | 44 +++++++++---------- 23 files changed, 133 insertions(+), 134 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py index a14ce27029b2..d4117cad1ebd 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py @@ -98,7 +98,7 @@ async def get_properties(self): """ alt_creds = { "username": self._auth_config.get("iot_username"), - "password":self._auth_config.get("iot_password")} + "password": self._auth_config.get("iot_password")} try: mgmt_auth = self._create_auth(**alt_creds) mgmt_client = AMQPClientAsync(self.mgmt_target, auth=mgmt_auth, debug=self.debug) @@ -114,12 +114,12 @@ async def get_properties(self): output = {} if eh_info: output['path'] = eh_info[b'name'].decode('utf-8') - output['created_at'] = datetime.datetime.utcfromtimestamp(float(eh_info[b'created_at'])/1000) + output['created_at'] = datetime.datetime.utcfromtimestamp(float(eh_info[b'created_at']) / 1000) output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']] return output finally: await mgmt_client.close_async() - + async def get_partition_ids(self): """ Get partition ids of the specified EventHub async. @@ -179,7 +179,7 @@ def create_consumer( self, consumer_group, partition_id, event_position, owner_level=None, operation=None, prefetch=None, loop=None): """ - Create an async receiver to the client for a particular consumer group and partition. + Create an async consumer to the client for a particular consumer group and partition. :param consumer_group: The name of the consumer group. Default value is `$Default`. :type consumer_group: str @@ -187,13 +187,13 @@ def create_consumer( :type partition_id: str :param event_position: The position from which to start receiving. :type event_position: ~azure.eventhub.common.EventPosition - :param owner_level: The priority of the exclusive receiver. The client will create an exclusive - receiver if owner_level is set. + :param owner_level: The priority of the exclusive consumer. The client will create an exclusive + consumer if owner_level is set. :type owner_level: int :param operation: An optional operation to be appended to the hostname in the source URL. The value must start with `/` character. :type operation: str - :param prefetch: The message prefetch count of the receiver. Default is 300. + :param prefetch: The message prefetch count of the consumer. Default is 300. :type prefetch: int :param loop: An event loop. If not specified the default event loop will be used. :rtype: ~azure.eventhub.aio.receiver_async.EventHubConsumer @@ -204,7 +204,7 @@ def create_consumer( :end-before: [END create_eventhub_client_async_receiver] :language: python :dedent: 4 - :caption: Add an async receiver to the client for a particular consumer group and partition. + :caption: Add an async consumer to the client for a particular consumer group and partition. """ prefetch = self.config.prefetch if prefetch is None else prefetch @@ -220,7 +220,7 @@ def create_consumer( def create_producer( self, partition_id=None, operation=None, send_timeout=None, loop=None): """ - Create an async sender to the client to send ~azure.eventhub.common.EventData object + Create an async producer to the client to send ~azure.eventhub.common.EventData object to an EventHub. :param partition_id: Optionally specify a particular partition to send to. @@ -243,7 +243,7 @@ def create_producer( :end-before: [END create_eventhub_client_async_sender] :language: python :dedent: 4 - :caption: Add an async sender to the client to + :caption: Add an async producer to the client to send ~azure.eventhub.common.EventData object to an EventHub. """ diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py index 00818195a479..4fa465b75924 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py @@ -28,7 +28,7 @@ def __init__( # pylint: disable=super-init-not-called self, client, source, event_position=None, prefetch=300, owner_level=None, keep_alive=None, auto_reconnect=True, loop=None): """ - Instantiate an async receiver. + Instantiate an async consumer. :param client: The parent EventHubClientAsync. :type client: ~azure.eventhub.aio.EventHubClientAsync @@ -39,8 +39,8 @@ def __init__( # pylint: disable=super-init-not-called :param prefetch: The number of events to prefetch from the service for processing. Default is 300. :type prefetch: int - :param owner_level: The priority of the exclusive receiver. It will an exclusive - receiver if owner_level is set. + :param owner_level: The priority of the exclusive consumer. It will an exclusive + consumer if owner_level is set. :type owner_level: int :param loop: An event loop. """ @@ -155,7 +155,7 @@ async def __anext__(self): def _check_closed(self): if self.error: - raise EventHubError("This receiver has been closed. Please create a new receiver to receive event data.", + raise EventHubError("This consumer has been closed. Please create a new consumer to receive event data.", self.error) async def _open(self): """ diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py index c526d17faf4a..23347bee9069 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py @@ -29,7 +29,7 @@ def __init__( # pylint: disable=super-init-not-called self, client, target, partition=None, send_timeout=60, keep_alive=None, auto_reconnect=True, loop=None): """ - Instantiate an EventHub event SenderAsync handler. + Instantiate an async EventHubProducer. :param client: The parent EventHubClientAsync. :type client: ~azure.eventhub.aio.EventHubClientAsync @@ -44,7 +44,7 @@ def __init__( # pylint: disable=super-init-not-called :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during periods of inactivity. The default value is `None`, i.e. no keep alive pings. :type keep_alive: float - :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. + :param auto_reconnect: Whether to automatically reconnect the producer if a retryable error occurs. Default value is `True`. :type auto_reconnect: bool :param loop: An event loop. If not specified the default event loop will be used. @@ -59,7 +59,7 @@ def __init__( # pylint: disable=super-init-not-called self.timeout = send_timeout self.retry_policy = errors.ErrorPolicy(max_retries=self.client.config.max_retries, on_error=_error_handler) self.reconnect_backoff = 1 - self.name = "EHSender-{}".format(uuid.uuid4()) + self.name = "EHProducer-{}".format(uuid.uuid4()) self.unsent_events = None self.redirected = None self.error = None @@ -305,7 +305,7 @@ async def _send_event_data(self): def _check_closed(self): if self.error: - raise EventHubError("This sender has been closed. Please create a new sender to send event data.", + raise EventHubError("This producer has been closed. Please create a new producer to send event data.", self.error) @staticmethod diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index 66ef6a307a69..ab77bb0e189d 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -193,7 +193,7 @@ def create_consumer( ): # type: (str, str, EventPosition, int, str, int) -> EventHubConsumer """ - Create a receiver to the client for a particular consumer group and partition. + Create a consumer to the client for a particular consumer group and partition. :param consumer_group: The name of the consumer group. Default value is `$Default`. :type consumer_group: str @@ -201,13 +201,13 @@ def create_consumer( :type partition_id: str :param event_position: The position from which to start receiving. :type event_position: ~azure.eventhub.common.EventPosition - :param owner_level: The priority of the exclusive receiver. The client will create an exclusive - receiver if owner_level is set. + :param owner_level: The priority of the exclusive consumer. The client will create an exclusive + consumer if owner_level is set. :type owner_level: int :param operation: An optional operation to be appended to the hostname in the source URL. The value must start with `/` character. :type operation: str - :param prefetch: The message prefetch count of the receiver. Default is 300. + :param prefetch: The message prefetch count of the consumer. Default is 300. :type prefetch: int :rtype: ~azure.eventhub.receiver.EventHubConsumer @@ -217,7 +217,7 @@ def create_consumer( :end-before: [END create_eventhub_client_receiver] :language: python :dedent: 4 - :caption: Add a receiver to the client for a particular consumer group and partition. + :caption: Add a consumer to the client for a particular consumer group and partition. """ prefetch = self.config.prefetch if prefetch is None else prefetch @@ -253,7 +253,7 @@ def create_producer(self, partition_id=None, operation=None, send_timeout=None): :end-before: [END create_eventhub_client_sender] :language: python :dedent: 4 - :caption: Add a sender to the client to send EventData object to an EventHub. + :caption: Add a producer to the client to send EventData object to an EventHub. """ target = "amqps://{}{}".format(self.address.hostname, self.address.path) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index f0817e0189c9..1dba09ac09ae 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -117,7 +117,7 @@ def __init__(self, host, event_hub_path, credential, **kwargs): ~uamqp.TransportType.AmqpOverWebsocket is applied when http_proxy is set or the transport type is explicitly requested. :type transport_type: ~azure.eventhub.TransportType - :param prefetch: The message prefetch count of the receiver. Default is 300. + :param prefetch: The message prefetch count of the consumer. Default is 300. :type prefetch: int :param max_batch_size: Receive a batch of events. Batch size will be up to the maximum specified, but will return as soon as service returns no new events. Default value is the same as prefetch. @@ -181,7 +181,7 @@ def from_connection_string(cls, conn_str, event_hub_path=None, **kwargs): ~uamqp.TransportType.AmqpOverWebsocket is applied when http_proxy is set or the transport type is explicitly requested. :type transport_type: ~azure.eventhub.TransportType - :param prefetch: The message prefetch count of the receiver. Default is 300. + :param prefetch: The message prefetch count of the consumer. Default is 300. :type prefetch: int :param max_batch_size: Receive a batch of events. Batch size will be up to the maximum specified, but will return as soon as service returns no new events. Default value is the same as prefetch. @@ -237,7 +237,7 @@ def from_iothub_connection_string(cls, conn_str, **kwargs): ~uamqp.TransportType.AmqpOverWebsocket is applied when http_proxy is set or the transport type is explicitly requested. :type transport_type: ~azure.eventhub.TransportType - :param prefetch: The message prefetch count of the receiver. Default is 300. + :param prefetch: The message prefetch count of the consumer. Default is 300. :type prefetch: int :param max_batch_size: Receive a batch of events. Batch size will be up to the maximum specified, but will return as soon as service returns no new events. Default value is the same as prefetch. diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index df526d83da93..2f1cebefc3c7 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -273,7 +273,7 @@ def _set_partition_key(self, value): class EventPosition(object): """ - The position(offset, sequence or timestamp) where a receiver starts. Examples: + The position(offset, sequence or timestamp) where a consumer starts. Examples: Beginning of the event stream: >>> event_pos = EventPosition("-1") diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py index d7816d3aeeba..5fb8eaccc80c 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py @@ -30,7 +30,7 @@ class EventHubConsumer(object): def __init__(self, client, source, event_position=None, prefetch=300, owner_level=None, keep_alive=None, auto_reconnect=True): """ - Instantiate a receiver. + Instantiate a consumer. :param client: The parent EventHubClient. :type client: ~azure.eventhub.client.EventHubClient @@ -39,8 +39,8 @@ def __init__(self, client, source, event_position=None, prefetch=300, owner_leve :param prefetch: The number of events to prefetch from the service for processing. Default is 300. :type prefetch: int - :param owner_level: The priority of the exclusive receiver. It will an exclusive - receiver if owner_level is set. + :param owner_level: The priority of the exclusive consumer. It will an exclusive + consumer if owner_level is set. :type owner_level: int """ self.running = False @@ -157,7 +157,7 @@ def __next__(self): def _check_closed(self): if self.error: - raise EventHubError("This receiver has been closed. Please create a new receiver to receive event data.", + raise EventHubError("This consumer has been closed. Please create a new consumer to receive event data.", self.error) def _redirect(self, redirect): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py index 736bcd7a397c..2380ffb7932c 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py @@ -28,7 +28,7 @@ class EventHubProducer(object): def __init__(self, client, target, partition=None, send_timeout=60, keep_alive=None, auto_reconnect=True): """ - Instantiate an EventHub event EventHubProducer handler. + Instantiate an EventHubProducer. :param client: The parent EventHubClient. :type client: ~azure.eventhub.client.EventHubClient. @@ -43,7 +43,7 @@ def __init__(self, client, target, partition=None, send_timeout=60, keep_alive=N :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during periods of inactivity. The default value is None, i.e. no keep alive pings. :type keep_alive: float - :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. + :param auto_reconnect: Whether to automatically reconnect the producer if a retryable error occurs. Default value is `True`. :type auto_reconnect: bool """ @@ -58,7 +58,7 @@ def __init__(self, client, target, partition=None, send_timeout=60, keep_alive=N self.auto_reconnect = auto_reconnect self.retry_policy = errors.ErrorPolicy(max_retries=self.client.config.max_retries, on_error=_error_handler) self.reconnect_backoff = 1 - self.name = "EHSender-{}".format(uuid.uuid4()) + self.name = "EHProducer-{}".format(uuid.uuid4()) self.unsent_events = None if partition: self.target += "/Partitions/" + partition @@ -300,7 +300,7 @@ def _send_event_data(self): def _check_closed(self): if self.error: - raise EventHubError("This sender has been closed. Please create a new sender to send event data.", self.error) + raise EventHubError("This producer has been closed. Please create a new producer to send event data.", self.error) @staticmethod def _set_partition_key(event_datas, partition_key): diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/iterator_receiver_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/iterator_receiver_async.py index 7bf5227f8054..7e66ad71e28e 100644 --- a/sdk/eventhub/azure-eventhubs/examples/async_examples/iterator_receiver_async.py +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/iterator_receiver_async.py @@ -6,7 +6,7 @@ # -------------------------------------------------------------------------------------------- """ -An example to show iterator receiver. +An example to show iterator consumer. """ import os @@ -30,10 +30,10 @@ EVENT_POSITION = EventPosition.first_available_event() -async def iter_receiver(receiver): - async with receiver: - async for item in receiver: - print(item.body_as_str(), item.offset.value, receiver.name) +async def iter_consumer(consumer): + async with consumer: + async for item in consumer: + print(item.body_as_str(), item.offset.value, consumer.name) async def main(): @@ -41,8 +41,8 @@ async def main(): raise ValueError("No EventHubs URL supplied.") client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) - receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EVENT_POSITION) - await iter_receiver(receiver) + consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EVENT_POSITION) + await iter_consumer(consumer) if __name__ == '__main__': asyncio.run(main()) diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_async.py index f6bb9de8d32f..cc0b0f317c70 100644 --- a/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_async.py +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/recv_async.py @@ -6,7 +6,7 @@ # -------------------------------------------------------------------------------------------- """ -An example to show running concurrent receivers. +An example to show running concurrent consumers. """ import os @@ -31,11 +31,11 @@ async def pump(client, partition): - receiver = client.create_consumer(consumer_group="$default", partition_id=partition, event_position=EVENT_POSITION, prefetch=5) - async with receiver: + consumer = client.create_consumer(consumer_group="$default", partition_id=partition, event_position=EVENT_POSITION, prefetch=5) + async with consumer: total = 0 start_time = time.time() - for event_data in await receiver.receive(timeout=10): + for event_data in await consumer.receive(timeout=10): last_offset = event_data.offset last_sn = event_data.sequence_number print("Received: {}, {}".format(last_offset.value, last_sn)) diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py index 76a56b8519b2..ffae6787628d 100644 --- a/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/send_async.py @@ -30,16 +30,16 @@ async def run(client): - sender = client.create_producer() - await send(sender, 4) + producer = client.create_producer() + await send(producer, 4) -async def send(sender, count): - async with sender: +async def send(producer, count): + async with producer: for i in range(count): logger.info("Sending message: {}".format(i)) data = EventData(str(i)) - await sender.send(data) + await producer.send(data) try: if not HOSTNAME: diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py index 9790259c5129..c22e602781df 100644 --- a/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py @@ -31,36 +31,36 @@ async def test_example_eventhub_async_send_and_receive(live_eventhub_config): # [START create_eventhub_client_async_sender] client = EventHubClient.from_connection_string(connection_str) - # Create an async sender. - sender = client.create_producer(partition_id="0") + # Create an async producer. + producer = client.create_producer(partition_id="0") # [END create_eventhub_client_async_sender] # [START create_eventhub_client_async_receiver] client = EventHubClient.from_connection_string(connection_str) - # Create an async receiver. + # Create an async consumer. receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) - # Create an exclusive async receiver. + # Create an exclusive async consumer. receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest'), owner_level=1) # [END create_eventhub_client_async_receiver] client = EventHubClient.from_connection_string(connection_str) - sender = client.create_producer(partition_id="0") - receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) + producer = client.create_producer(partition_id="0") + consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) - await receiver.receive(timeout=1) + await consumer.receive(timeout=1) # [START eventhub_client_async_send] - async with sender: + async with producer: event_data = EventData(b"A single event") - await sender.send(event_data) + await producer.send(event_data) # [END eventhub_client_async_send] await asyncio.sleep(1) # [START eventhub_client_async_receive] logger = logging.getLogger("azure.eventhub") - async with receiver: - received = await receiver.receive(timeout=5) + async with consumer: + received = await consumer.receive(timeout=5) for event_data in received: logger.info("Message received:{}".format(event_data.body_as_str())) # [END eventhub_client_async_receive] @@ -70,35 +70,35 @@ async def test_example_eventhub_async_send_and_receive(live_eventhub_config): @pytest.mark.asyncio -async def test_example_eventhub_async_sender_ops(live_eventhub_config, connection_str): +async def test_example_eventhub_async_producer_ops(live_eventhub_config, connection_str): from azure.eventhub.aio import EventHubClient from azure.eventhub import EventData # [START eventhub_client_async_sender_close] client = EventHubClient.from_connection_string(connection_str) - sender = client.create_producer(partition_id="0") + producer = client.create_producer(partition_id="0") try: - await sender.send(EventData(b"A single event")) + await producer.send(EventData(b"A single event")) finally: # Close down the send handler. - await sender.close() + await producer.close() # [END eventhub_client_async_sender_close] @pytest.mark.asyncio -async def test_example_eventhub_async_receiver_ops(live_eventhub_config, connection_str): +async def test_example_eventhub_async_consumer_ops(live_eventhub_config, connection_str): from azure.eventhub.aio import EventHubClient from azure.eventhub import EventPosition # [START eventhub_client_async_receiver_close] client = EventHubClient.from_connection_string(connection_str) - receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) + consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) try: # Open and receive - await receiver.receive(timeout=1) + await consumer.receive(timeout=1) except: raise finally: # Close down the receive handler. - await receiver.close() + await consumer.close() # [END eventhub_client_async_receiver_close] diff --git a/sdk/eventhub/azure-eventhubs/examples/batch_send.py b/sdk/eventhub/azure-eventhubs/examples/batch_send.py index e9a2e31a0092..a539775c062a 100644 --- a/sdk/eventhub/azure-eventhubs/examples/batch_send.py +++ b/sdk/eventhub/azure-eventhubs/examples/batch_send.py @@ -33,16 +33,16 @@ raise ValueError("No EventHubs URL supplied.") client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) - sender = client.create_producer(partition_id="1") + producer = client.create_producer(partition_id="1") event_list = [] for i in range(1500): event_list.append('Hello World') - with sender: + with producer: start_time = time.time() data = EventData(body=event_list) - sender.send(data) + producer.send(data) end_time = time.time() run_time = end_time - start_time logger.info("Runtime: {} seconds".format(run_time)) diff --git a/sdk/eventhub/azure-eventhubs/examples/client_secret_auth.py b/sdk/eventhub/azure-eventhubs/examples/client_secret_auth.py index 1a0840e02cfe..54d439281f2b 100644 --- a/sdk/eventhub/azure-eventhubs/examples/client_secret_auth.py +++ b/sdk/eventhub/azure-eventhubs/examples/client_secret_auth.py @@ -36,11 +36,11 @@ event_hub_path=EVENT_HUB, credential=credential) try: - sender = client.create_producer(partition_id='0') + producer = client.create_producer(partition_id='0') - with sender: + with producer: event = EventData(body='A single message') - sender.send(event) + producer.send(event) except KeyboardInterrupt: pass diff --git a/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py b/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py index b70b03284f65..d0c11c970dd8 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py +++ b/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py @@ -19,9 +19,9 @@ iot_connection_str = 'HostName=iothubfortrack2py.azure-devices.net;SharedAccessKeyName=iothubowner;SharedAccessKey=glF9a2n0D9fgmWpfTqjjmvkYt0WaTNqZx9GV/UKwDkQ=' # os.environ['IOTHUB_CONNECTION_STR'] client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) -receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') -with receiver: - received = receiver.receive(timeout=5) +consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') +with consumer: + received = consumer.receive(timeout=5) print(received) eh_info = client.get_properties() diff --git a/sdk/eventhub/azure-eventhubs/examples/iothub_send.py b/sdk/eventhub/azure-eventhubs/examples/iothub_send.py index 152afd81355e..cc664a73ad45 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iothub_send.py +++ b/sdk/eventhub/azure-eventhubs/examples/iothub_send.py @@ -21,9 +21,9 @@ client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) try: - sender = client.create_producer(operation='/messages/devicebound') - with sender: - sender.send(EventData(b"A single event", to_device=iot_device_id)) + producer = client.create_producer(operation='/messages/devicebound') + with producer: + producer.send(EventData(b"A single event", to_device=iot_device_id)) except KeyboardInterrupt: pass diff --git a/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py b/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py index 22bd4b4e14ea..fe20fc8503dd 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py +++ b/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py @@ -25,20 +25,20 @@ EVENT_POSITION = EventPosition.first_available_event() -class PartitionReceiverThread(Thread): - def __init__(self, receiver): +class PartitionConsumerThread(Thread): + def __init__(self, consumer): Thread.__init__(self) - self.receiver = receiver + self.consumer = consumer def run(self): - for item in self.receiver: + for item in self.consumer: print(item) client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) -receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EVENT_POSITION) -with receiver: - thread = PartitionReceiverThread(receiver) +consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EVENT_POSITION) +with consumer: + thread = PartitionConsumerThread(consumer) thread.start() thread.join(2) # stop after 2 seconds diff --git a/sdk/eventhub/azure-eventhubs/examples/proxy.py b/sdk/eventhub/azure-eventhubs/examples/proxy.py index 9eb74d12c418..e8d7e3fff9ad 100644 --- a/sdk/eventhub/azure-eventhubs/examples/proxy.py +++ b/sdk/eventhub/azure-eventhubs/examples/proxy.py @@ -39,10 +39,10 @@ client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False, http_proxy=HTTP_PROXY) try: - sender = client.create_producer(partition_id=PARTITION) - receiver = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EVENT_POSITION) + producer = client.create_producer(partition_id=PARTITION) + consumer = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EVENT_POSITION) - receiver.receive(timeout=1) + consumer.receive(timeout=1) event_list = [] for i in range(20): @@ -50,12 +50,12 @@ print('Start sending events behind a proxy.') - sender.send(event_list) + producer.send(event_list) print('Start receiving events behind a proxy.') - received = receiver.receive(max_batch_size=50, timeout=5) + received = consumer.receive(max_batch_size=50, timeout=5) finally: - sender.close() - receiver.close() + producer.close() + consumer.close() diff --git a/sdk/eventhub/azure-eventhubs/examples/recv.py b/sdk/eventhub/azure-eventhubs/examples/recv.py index e8609d91db97..7f78a8a5ad51 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv.py @@ -33,10 +33,10 @@ network_tracing=False) try: - receiver = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EVENT_POSITION, prefetch=5000) - with receiver: + consumer = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EVENT_POSITION, prefetch=5000) + with consumer: start_time = time.time() - batch = receiver.receive(timeout=5000) + batch = consumer.receive(timeout=5000) while batch: for event_data in batch: last_offset = event_data.offset @@ -44,7 +44,7 @@ print("Received: {}, {}".format(last_offset.value, last_sn)) print(event_data.body_as_str()) total += 1 - batch = receiver.receive(timeout=5000) + batch = consumer.receive(timeout=5000) end_time = time.time() run_time = end_time - start_time diff --git a/sdk/eventhub/azure-eventhubs/examples/recv_batch.py b/sdk/eventhub/azure-eventhubs/examples/recv_batch.py index 2b36f089274f..0b37769d0242 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv_batch.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv_batch.py @@ -34,9 +34,9 @@ client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) try: - receiver = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EVENT_POSITION, prefetch=100) - with receiver: - batched_events = receiver.receive(max_batch_size=10) + consumer = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EVENT_POSITION, prefetch=100) + with consumer: + batched_events = consumer.receive(max_batch_size=10) for event_data in batched_events: last_offset = event_data.offset.value last_sn = event_data.sequence_number diff --git a/sdk/eventhub/azure-eventhubs/examples/recv_epoch.py b/sdk/eventhub/azure-eventhubs/examples/recv_epoch.py index 878b4d7de263..4217874771ad 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv_epoch.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv_epoch.py @@ -6,7 +6,7 @@ # -------------------------------------------------------------------------------------------- """ -An example to show receiving events from an Event Hub partition as an epoch receiver. +An example to show receiving events from an Event Hub partition as an epoch consumer. """ import os @@ -26,16 +26,15 @@ USER = os.environ.get('EVENT_HUB_SAS_POLICY') KEY = os.environ.get('EVENT_HUB_SAS_KEY') -EXCLUSIVE_RECEIVER_PRIORITY = 42 PARTITION = "0" async def pump(client, owner_level): - receiver = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EventPosition("-1"), owner_level=owner_level) - async with receiver: + consumer = client.create_consumer(consumer_group="$default", partition_id=PARTITION, event_position=EventPosition("-1"), owner_level=owner_level) + async with consumer: total = 0 start_time = time.time() - for event_data in await receiver.receive(timeout=5): + for event_data in await consumer.receive(timeout=5): last_offset = event_data.offset last_sn = event_data.sequence_number total += 1 diff --git a/sdk/eventhub/azure-eventhubs/examples/send.py b/sdk/eventhub/azure-eventhubs/examples/send.py index 7f319bcfb1dc..4954b1146b23 100644 --- a/sdk/eventhub/azure-eventhubs/examples/send.py +++ b/sdk/eventhub/azure-eventhubs/examples/send.py @@ -33,16 +33,16 @@ client = EventHubClient(host=HOSTNAME, event_hub_path=EVENT_HUB, credential=EventHubSharedKeyCredential(USER, KEY), network_tracing=False) - sender = client.create_producer(partition_id="0") + producer = client.create_producer(partition_id="0") ed = EventData("msg") try: start_time = time.time() - with sender: + with producer: for i in range(100): logger.info("Sending message: {}".format(i)) - sender.send(ed) + producer.send(ed) except: raise finally: diff --git a/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py b/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py index d46f5a0b63e8..6b1552047fab 100644 --- a/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py +++ b/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py @@ -57,25 +57,25 @@ def test_example_eventhub_sync_send_and_receive(live_eventhub_config): from azure.eventhub import EventData, EventPosition - # [START create_eventhub_client_sender] + # [START create_eventhub_client_producer] client = EventHubClient.from_connection_string(connection_str) - # Create a sender. - sender = client.create_producer(partition_id="0") + # Create a producer. + producer = client.create_producer(partition_id="0") # [END create_eventhub_client_sender] # [START create_eventhub_client_receiver] client = EventHubClient.from_connection_string(connection_str) - # Create a receiver. - receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) - # Create an exclusive receiver object. - exclusive_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), owner_level=1) + # Create a consumer. + consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) + # Create an exclusive consumer object. + exclusive_consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), owner_level=1) # [END create_eventhub_client_receiver] client = EventHubClient.from_connection_string(connection_str) - sender = client.create_producer(partition_id="0") - receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) + producer = client.create_producer(partition_id="0") + consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) try: - receiver.receive(timeout=1) + consumer.receive(timeout=1) # [START create_event_data] event_data = EventData("String data") @@ -87,16 +87,16 @@ def test_example_eventhub_sync_send_and_receive(live_eventhub_config): # [END create_event_data] # [START eventhub_client_sync_send] - with sender: + with producer: event_data = EventData(b"A single event") - sender.send(event_data) + producer.send(event_data) # [END eventhub_client_sync_send] time.sleep(1) # [START eventhub_client_sync_receive] - with receiver: + with consumer: logger = logging.getLogger("azure.eventhub") - received = receiver.receive(timeout=5, max_batch_size=1) + received = consumer.receive(timeout=5, max_batch_size=1) for event_data in received: logger.info("Message received:{}".format(event_data.body_as_str())) # [END eventhub_client_sync_receive] @@ -107,30 +107,30 @@ def test_example_eventhub_sync_send_and_receive(live_eventhub_config): pass -def test_example_eventhub_sender_ops(live_eventhub_config, connection_str): +def test_example_eventhub_producer_ops(live_eventhub_config, connection_str): from azure.eventhub import EventHubClient, EventData # [START eventhub_client_sender_close] client = EventHubClient.from_connection_string(connection_str) - sender = client.create_producer(partition_id="0") + producer = client.create_producer(partition_id="0") try: - sender.send(EventData(b"A single event")) + producer.send(EventData(b"A single event")) finally: # Close down the send handler. - sender.close() + producer.close() # [END eventhub_client_sender_close] -def test_example_eventhub_receiver_ops(live_eventhub_config, connection_str): +def test_example_eventhub_consumer_ops(live_eventhub_config, connection_str): from azure.eventhub import EventHubClient from azure.eventhub import EventPosition # [START eventhub_client_receiver_close] client = EventHubClient.from_connection_string(connection_str) - receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) + consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition('@latest')) try: - receiver.receive(timeout=1) + consumer.receive(timeout=1) finally: # Close down the receive handler. - receiver.close() + consumer.close() # [END eventhub_client_receiver_close] From 86af9834ee19e421e7634579749d6b0f7daa89cf Mon Sep 17 00:00:00 2001 From: yijxie Date: Thu, 20 Jun 2019 10:05:03 -0700 Subject: [PATCH 19/54] Remove azure-core requirement tentatively --- sdk/eventhub/azure-eventhubs/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhubs/setup.py b/sdk/eventhub/azure-eventhubs/setup.py index 40c11892fd64..f3cc586d2bd2 100644 --- a/sdk/eventhub/azure-eventhubs/setup.py +++ b/sdk/eventhub/azure-eventhubs/setup.py @@ -79,7 +79,7 @@ 'msrestazure>=0.4.32,<2.0.0', 'azure-common~=1.1', 'azure-storage-blob~=1.3', - 'azure-core>=0.0.1', + # 'azure-core>=0.0.1', # will add back here and remove from dev_requirements.txt after azure core is released ], extras_require={ ":python_version<'3.0'": ['azure-nspkg'], From 981fc99dbe353afc92d95b1c288da409d7a9cf61 Mon Sep 17 00:00:00 2001 From: yijxie Date: Thu, 20 Jun 2019 10:16:39 -0700 Subject: [PATCH 20/54] Warn when eventhubs or storage teardown fails --- sdk/eventhub/azure-eventhubs/conftest.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/conftest.py b/sdk/eventhub/azure-eventhubs/conftest.py index 937dd0eed850..422b9dd20521 100644 --- a/sdk/eventhub/azure-eventhubs/conftest.py +++ b/sdk/eventhub/azure-eventhubs/conftest.py @@ -9,6 +9,7 @@ import logging import sys import uuid +import warnings from logging.handlers import RotatingFileHandler # Ignore async tests for Python < 3.5 @@ -109,8 +110,11 @@ def live_eventhub(live_eventhub_config): # pylint: disable=redefined-outer-name live_eventhub_config['event_hub'] = hub_name yield live_eventhub_config finally: - cleanup_eventhub(live_eventhub_config, hub_name, client=client) - print("Deleted EventHub {}".format(hub_name)) + try: + cleanup_eventhub(live_eventhub_config, hub_name, client=client) + print("Deleted EventHub {}".format(hub_name)) + except: + warnings.warn(UserWarning("eventhub teardown failed")) @pytest.fixture() @@ -214,7 +218,10 @@ def storage_clm(eph): storage_clm.storage_client.create_container(container) yield storage_clm finally: - storage_clm.storage_client.delete_container(container) + try: + storage_clm.storage_client.delete_container(container) + except: + warnings.warn(UserWarning("storage container teardown failed")) @pytest.fixture() From 3074c792420d131f7765938736e8ac5914057bb1 Mon Sep 17 00:00:00 2001 From: yijxie Date: Thu, 20 Jun 2019 15:18:52 -0700 Subject: [PATCH 21/54] add type hints comments --- .../azure/eventhub/aio/event_hubs_client_async.py | 12 ++++++++---- .../azure/eventhub/aio/receiver_async.py | 6 +++++- .../azure/eventhub/aio/sender_async.py | 2 ++ .../azure-eventhubs/azure/eventhub/client.py | 11 +++-------- .../azure/eventhub/client_abstract.py | 13 +++++++++++-- .../azure-eventhubs/azure/eventhub/receiver.py | 6 +++++- .../azure-eventhubs/azure/eventhub/sender.py | 3 +++ .../tests/asynctests/test_reconnect_async.py | 2 +- .../azure-eventhubs/tests/test_reconnect.py | 2 +- 9 files changed, 39 insertions(+), 18 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py index d4117cad1ebd..5e7d38be2fe4 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py @@ -4,10 +4,11 @@ # -------------------------------------------------------------------------------------------- import logging -import asyncio -import time import datetime import functools +import asyncio +from typing import Any, List, Dict, Union + from uamqp import authentication, constants, types, errors from uamqp import ( @@ -16,8 +17,6 @@ ) from azure.eventhub.common import parse_sas_token, EventPosition, EventHubSharedKeyCredential, EventHubSASTokenCredential -from azure.eventhub import ( - EventHubError) from ..client_abstract import EventHubClientAbstract from .sender_async import EventHubProducer @@ -86,6 +85,7 @@ def _create_auth(self, username=None, password=None): transport_type=transport_type) async def get_properties(self): + # type:() -> Dict[str, Any] """ Get properties of the specified EventHub async. Keys in the details dictionary include: @@ -121,6 +121,7 @@ async def get_properties(self): await mgmt_client.close_async() async def get_partition_ids(self): + # type:() -> List[str] """ Get partition ids of the specified EventHub async. @@ -129,6 +130,7 @@ async def get_partition_ids(self): return (await self.get_properties())['partition_ids'] async def get_partition_properties(self, partition): + # type:(str) -> Dict[str, str] """ Get properties of the specified partition async. Keys in the details dictionary include: @@ -178,6 +180,7 @@ async def get_partition_properties(self, partition): def create_consumer( self, consumer_group, partition_id, event_position, owner_level=None, operation=None, prefetch=None, loop=None): + # type: (str, str, EventPosition, int, str, int, asyncio.AbstractEventLoop) -> EventHubConsumer """ Create an async consumer to the client for a particular consumer group and partition. @@ -219,6 +222,7 @@ def create_consumer( def create_producer( self, partition_id=None, operation=None, send_timeout=None, loop=None): + # type: (str, str, float, asyncio.AbstractEventLoop) -> EventHubProducer """ Create an async producer to the client to send ~azure.eventhub.common.EventData object to an EventHub. diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py index 4fa465b75924..d4f7aa419495 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py @@ -6,11 +6,12 @@ import asyncio import uuid import logging +from typing import List from uamqp import errors, types, compat from uamqp import ReceiveClientAsync, Source -from azure.eventhub import EventHubError, EventData +from azure.eventhub import EventData from azure.eventhub.error import EventHubError, AuthenticationError, ConnectError, ConnectionLostError, _error_handler log = logging.getLogger(__name__) @@ -282,6 +283,7 @@ async def _reconnect(self): return await self._build_connection(is_reconnect=True) async def close(self, exception=None): + # type: (Exception) -> None """ Close down the handler. If the handler has already closed, this will be a no op. An optional exception can be passed in to @@ -317,6 +319,7 @@ async def close(self, exception=None): @property def queue_size(self): + # type: () -> int """ The current size of the unprocessed Event queue. @@ -328,6 +331,7 @@ def queue_size(self): return 0 async def receive(self, max_batch_size=None, timeout=None): + # type: (int, float) -> List[EventData] """ Receive events asynchronously from the EventHub. diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py index 23347bee9069..ec297d41ee3f 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py @@ -194,6 +194,7 @@ async def _reconnect(self): return await self._build_connection(is_reconnect=True) async def close(self, exception=None): + # type: (Exception) -> None """ Close down the handler. If the handler has already closed, this will be a no op. An optional exception can be passed in to @@ -316,6 +317,7 @@ def _set_partition_key(event_datas, partition_key): yield ed async def send(self, event_data, partition_key=None): + # type:(List[EventData], Union[str, bytes]) -> None """ Sends an event data and blocks until acknowledgement is received or operation times out. diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index ab77bb0e189d..f3fc234ca13d 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -6,27 +6,22 @@ import logging import datetime -import sys -import uuid -import time import functools try: from urlparse import urlparse from urllib import unquote_plus, urlencode, quote_plus except ImportError: from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus -from typing import Any, List, Dict +from typing import Any, List, Dict, Union import uamqp from uamqp import Message, AMQPClient from uamqp import authentication from uamqp import constants -from azure.eventhub import __version__ from azure.eventhub.sender import EventHubProducer from azure.eventhub.receiver import EventHubConsumer from azure.eventhub.common import parse_sas_token, EventPosition -from azure.eventhub.error import EventHubError from .client_abstract import EventHubClientAbstract from .common import EventHubSASTokenCredential, EventHubSharedKeyCredential @@ -111,7 +106,7 @@ def get_properties(self): "password": self._auth_config.get("iot_password")} try: mgmt_auth = self._create_auth(**alt_creds) - mgmt_client = uamqp.AMQPClient(self.mgmt_target, auth=mgmt_auth, debug=self.debug) + mgmt_client = uamqp.AMQPClient(self.mgmt_target, auth=mgmt_auth, debug=self.config.network_tracing) mgmt_client.open() mgmt_msg = Message(application_properties={'name': self.eh_name}) response = mgmt_client.mgmt_request( @@ -140,7 +135,7 @@ def get_partition_ids(self): return self.get_properties()['partition_ids'] def get_partition_properties(self, partition): - # type:(str) -> Dict[str, str] + # type:(str) -> Dict[str, Any] """ Get properties of the specified partition. Keys in the details dictionary include: diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 1dba09ac09ae..5febafb5cd16 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -16,11 +16,19 @@ except ImportError: from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus +try: + from typing import TYPE_CHECKING +except ImportError: + TYPE_CHECKING = False +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + from typing import Union, List, Dict + from azure.eventhub import __version__ from azure.eventhub.configuration import Configuration from azure.eventhub import constants -from .common import EventHubSharedKeyCredential, _Address +from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential, _Address log = logging.getLogger(__name__) @@ -87,6 +95,7 @@ class EventHubClientAbstract(object): """ def __init__(self, host, event_hub_path, credential, **kwargs): + # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], ...) -> None """ Constructs a new EventHubClient. @@ -100,7 +109,7 @@ def __init__(self, host, event_hub_path, credential, **kwargs): :param credential: The credential object used for authentication which implements particular interface of getting tokens. It accepts ~azure.eventhub.EventHubSharedKeyCredential, ~azure.eventhub.EventHubSASTokenCredential, credential objects generated by the azure-identity library and - objects that implement get token interface. + objects that implement get_token(self, *scopes) method. :param http_proxy: HTTP proxy settings. This must be a dictionary with the following keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). Additionally the following keys may also be present: 'username', 'password'. diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py index 5fb8eaccc80c..9ec21f27e946 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py @@ -7,6 +7,7 @@ import uuid import logging import time +from typing import List from uamqp import types, errors from uamqp import compat @@ -299,6 +300,7 @@ def _reconnect(self): return self._build_connection(is_reconnect=True) def close(self, exception=None): + # type:(Exception) -> None """ Close down the handler. If the handler has already closed, this will be a no op. An optional exception can be passed in to @@ -335,6 +337,7 @@ def close(self, exception=None): @property def queue_size(self): + # type:() -> int """ The current size of the unprocessed Event queue. @@ -346,6 +349,7 @@ def queue_size(self): return 0 def receive(self, max_batch_size=None, timeout=None): + # type:(int, float) -> List[EventData] """ Receive events from the EventHub. @@ -377,7 +381,7 @@ def receive(self, max_batch_size=None, timeout=None): max_batch_size = min(self.client.config.max_batch_size, self.prefetch) if max_batch_size is None else max_batch_size timeout = self.client.config.receive_timeout if timeout is None else timeout - data_batch = [] + data_batch = [] # type: List[EventData] max_retries = self.client.config.max_retries connecting_count = 0 while True: diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py index 2380ffb7932c..74be7112c627 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py @@ -7,6 +7,7 @@ import uuid import logging import time +from typing import Iterator, Generator, List, Union from uamqp import constants, errors from uamqp import compat @@ -191,6 +192,7 @@ def _reconnect(self): return self._build_connection(is_reconnect=True) def close(self, exception=None): + # type:(Exception) -> None """ Close down the handler. If the handler has already closed, this will be a no op. An optional exception can be passed in to @@ -310,6 +312,7 @@ def _set_partition_key(event_datas, partition_key): yield ed def send(self, event_data, partition_key=None): + # type:(Union[EventData, Union[List[EventData], Iterator[EventData], Generator[EventData]]], Union[str, bytes]) -> None """ Sends an event data and blocks until acknowledgement is received or operation times out. diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py index 7ce3afc047ba..b85135366712 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py @@ -15,7 +15,7 @@ EventHubError) from azure.eventhub.aio import EventHubClient -SLEEP = True +SLEEP = False @pytest.mark.liveTest @pytest.mark.asyncio diff --git a/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py b/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py index 67df0cb0d376..8b0f6ead59b1 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py @@ -14,7 +14,7 @@ EventHubError, EventHubClient) -SLEEP = True +SLEEP = False @pytest.mark.liveTest def test_send_with_long_interval_sync(connstr_receivers): From dbf70018dba68692564a9911e3f5b266214473ab Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 21 Jun 2019 09:30:19 -0700 Subject: [PATCH 22/54] add azure identity in dev_requirements --- sdk/eventhub/azure-eventhubs/dev_requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/sdk/eventhub/azure-eventhubs/dev_requirements.txt b/sdk/eventhub/azure-eventhubs/dev_requirements.txt index fa716ae38ebe..e79b92edb6d2 100644 --- a/sdk/eventhub/azure-eventhubs/dev_requirements.txt +++ b/sdk/eventhub/azure-eventhubs/dev_requirements.txt @@ -1,5 +1,6 @@ -e ../../servicebus/azure-servicebus -e ../../core/azure-core +-e ../../core/azure-identity pytest>=3.4.1 pytest-asyncio>=0.8.0; python_version > '3.4' docutils>=0.14 From e7ff9519915d8dee8c3aa027a94364f65f57ecf8 Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 21 Jun 2019 09:38:56 -0700 Subject: [PATCH 23/54] put TransportType in __init__ directly --- .../azure-eventhubs/azure/eventhub/__init__.py | 7 +++---- .../azure-eventhubs/azure/eventhub/client_abstract.py | 9 ++++----- .../azure-eventhubs/azure/eventhub/constants.py | 11 ----------- 3 files changed, 7 insertions(+), 20 deletions(-) delete mode 100644 sdk/eventhub/azure-eventhubs/azure/eventhub/constants.py diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py index e3eb7b2b5b38..0f238404cfb2 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py @@ -11,12 +11,12 @@ from azure.eventhub.client import EventHubClient from azure.eventhub.sender import EventHubProducer from azure.eventhub.receiver import EventHubConsumer -from .constants import MessageSendResult -from .constants import TransportType +from uamqp import constants from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential +TransportType = constants.TransportType + __all__ = [ - "__version__", "EventData", "EventHubError", "ConnectError", @@ -28,7 +28,6 @@ "EventHubClient", "EventHubProducer", "EventHubConsumer", - "MessageSendResult", "TransportType", "EventHubSharedKeyCredential", "EventHubSASTokenCredential", diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 5febafb5cd16..984b18c6efb0 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -27,11 +27,10 @@ from azure.eventhub import __version__ from azure.eventhub.configuration import Configuration -from azure.eventhub import constants from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential, _Address log = logging.getLogger(__name__) - +MAX_USER_AGENT_LENGTH = 512 def _parse_conn_str(conn_str): endpoint = None @@ -99,7 +98,7 @@ def __init__(self, host, event_hub_path, credential, **kwargs): """ Constructs a new EventHubClient. - :param host: The hostname URI string of the the Event Hub. + :param host: The hostname of the the Event Hub. :type host: str :param event_hub_path: The path/name of the Event Hub :type event_hub_path: str @@ -306,10 +305,10 @@ def _create_properties(self, user_agent=None): # pylint: disable=no-self-use if user_agent: final_user_agent = '{}, {}'.format(final_user_agent, user_agent) - if len(final_user_agent) > constants.MAX_USER_AGENT_LENGTH: + if len(final_user_agent) > MAX_USER_AGENT_LENGTH: raise ValueError("The user-agent string cannot be more than {} in length." "Current user_agent string is: {} with length: {}".format( - constants.MAX_USER_AGENT_LENGTH, final_user_agent, len(final_user_agent))) + MAX_USER_AGENT_LENGTH, final_user_agent, len(final_user_agent))) properties["user-agent"] = final_user_agent return properties diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/constants.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/constants.py deleted file mode 100644 index e71d3815f48f..000000000000 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/constants.py +++ /dev/null @@ -1,11 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -#-------------------------------------------------------------------------- - -from uamqp import constants - -MAX_USER_AGENT_LENGTH = 512 -TransportType = constants.TransportType -MessageSendResult = constants.MessageSendResult From 9ce3f8ea3edb94da4a7be247b134cf0522d14949 Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 21 Jun 2019 09:45:01 -0700 Subject: [PATCH 24/54] change EventData's offset to be str, not EventPosition --- .../azure-eventhubs/azure/eventhub/aio/receiver_async.py | 4 ++-- sdk/eventhub/azure-eventhubs/azure/eventhub/common.py | 6 +++--- sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py index d4f7aa419495..c0d97c138f62 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py @@ -11,7 +11,7 @@ from uamqp import errors, types, compat from uamqp import ReceiveClientAsync, Source -from azure.eventhub import EventData +from azure.eventhub import EventData, EventPosition from azure.eventhub.error import EventHubError, AuthenticationError, ConnectError, ConnectionLostError, _error_handler log = logging.getLogger(__name__) @@ -100,7 +100,7 @@ async def __anext__(self): self.messages_iter = self._handler.receive_messages_iter_async() message = await self.messages_iter.__anext__() event_data = EventData(message=message) - self.offset = event_data.offset + self.offset = EventPosition(event_data.offset, inclusive=False) return event_data except errors.AuthenticationException as auth_error: if connecting_count < max_retries: diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index 2f1cebefc3c7..da01ec8603d1 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -123,12 +123,12 @@ def sequence_number(self): @property def offset(self): """ - The position of the event data object. + The offset of the event data object. - :rtype: ~azure.eventhub.common.EventPosition + :rtype: str """ try: - return EventPosition(self._annotations[EventData.PROP_OFFSET].decode('UTF-8')) + return self._annotations[EventData.PROP_OFFSET].decode('UTF-8') except (KeyError, AttributeError): return None diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py index 9ec21f27e946..2362fe4a0ce3 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py @@ -13,7 +13,7 @@ from uamqp import compat from uamqp import ReceiveClient, Source -from azure.eventhub.common import EventData +from azure.eventhub.common import EventData, EventPosition from azure.eventhub.error import EventHubError, AuthenticationError, ConnectError, ConnectionLostError, _error_handler @@ -97,7 +97,7 @@ def __next__(self): self.messages_iter = self._handler.receive_messages_iter() message = next(self.messages_iter) event_data = EventData(message=message) - self.offset = event_data.offset + self.offset = EventPosition(event_data.offset, inclusive=False) return event_data except errors.AuthenticationException as auth_error: if connecting_count < max_retries: From 95bba82a646847921b7ef91d5479cbca052f7a52 Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 21 Jun 2019 09:55:09 -0700 Subject: [PATCH 25/54] remove from_iot_connection_string --- .../azure/eventhub/client_abstract.py | 20 +++++++++++-------- .../azure-eventhubs/azure/eventhub/sender.py | 2 +- .../azure-eventhubs/examples/iothub_recv.py | 2 +- .../azure-eventhubs/examples/iothub_send.py | 2 +- .../examples/test_examples_eventhub.py | 2 +- .../asynctests/test_iothub_receive_async.py | 4 ++-- .../tests/test_iothub_receive.py | 2 +- .../azure-eventhubs/tests/test_iothub_send.py | 2 +- 8 files changed, 20 insertions(+), 16 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 984b18c6efb0..015bec880871 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -210,17 +210,21 @@ def from_connection_string(cls, conn_str, event_hub_path=None, **kwargs): :caption: Create an EventHubClient from a connection string. """ - address, policy, key, entity = _parse_conn_str(conn_str) - entity = event_hub_path or entity - left_slash_pos = address.find("//") - if left_slash_pos != -1: - host = address[left_slash_pos + 2:] + is_iot_conn_str = conn_str.lower().startswith("hostname") + if not is_iot_conn_str: + address, policy, key, entity = _parse_conn_str(conn_str) + entity = event_hub_path or entity + left_slash_pos = address.find("//") + if left_slash_pos != -1: + host = address[left_slash_pos + 2:] + else: + host = address + return cls(host, entity, EventHubSharedKeyCredential(policy, key), **kwargs) else: - host = address - return cls(host, entity, EventHubSharedKeyCredential(policy, key), **kwargs) + return cls._from_iothub_connection_string(conn_str, **kwargs) @classmethod - def from_iothub_connection_string(cls, conn_str, **kwargs): + def _from_iothub_connection_string(cls, conn_str, **kwargs): """ Create an EventHubClient from an IoTHub connection string. diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py index 74be7112c627..83d8550234e7 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py @@ -321,7 +321,7 @@ def send(self, event_data, partition_key=None): :type event_data: ~azure.eventhub.common.EventData :param partition_key: With the given partition_key, event data will land to a particular partition of the Event Hub decided by the service. - :type batching_label: str + :type partition_key: str :raises: ~azure.eventhub.common.EventHubError if the message fails to send. :return: None diff --git a/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py b/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py index d0c11c970dd8..dc49f599a399 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py +++ b/sdk/eventhub/azure-eventhubs/examples/iothub_recv.py @@ -18,7 +18,7 @@ iot_connection_str = 'HostName=iothubfortrack2py.azure-devices.net;SharedAccessKeyName=iothubowner;SharedAccessKey=glF9a2n0D9fgmWpfTqjjmvkYt0WaTNqZx9GV/UKwDkQ=' # os.environ['IOTHUB_CONNECTION_STR'] -client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) +client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) consumer = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') with consumer: received = consumer.receive(timeout=5) diff --git a/sdk/eventhub/azure-eventhubs/examples/iothub_send.py b/sdk/eventhub/azure-eventhubs/examples/iothub_send.py index cc664a73ad45..06d35b102647 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iothub_send.py +++ b/sdk/eventhub/azure-eventhubs/examples/iothub_send.py @@ -19,7 +19,7 @@ iot_device_id = os.environ['IOTHUB_DEVICE'] iot_connection_str = os.environ['IOTHUB_CONNECTION_STR'] -client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) +client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) try: producer = client.create_producer(operation='/messages/devicebound') with producer: diff --git a/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py b/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py index 6b1552047fab..5231a76c8a87 100644 --- a/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py +++ b/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py @@ -38,7 +38,7 @@ def create_eventhub_client_from_iothub_connection_string(live_eventhub_config): from azure.eventhub import EventHubClient iot_connection_str = os.environ['IOTHUB_CONNECTION_STR'] - client = EventHubClient.from_iothub_connection_string(iot_connection_str) + client = EventHubClient.from_connection_string(iot_connection_str) # [END create_eventhub_client_iot_connstr] diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py index d443edafe2cb..f581f64584ab 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_iothub_receive_async.py @@ -24,7 +24,7 @@ async def pump(receiver, sleep=None): async def get_partitions(iot_connection_str): - client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) + client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), prefetch=1000, operation='/messages/events') async with receiver: partitions = await client.get_properties() @@ -36,7 +36,7 @@ async def get_partitions(iot_connection_str): async def test_iothub_receive_multiple_async(iot_connection_str): pytest.skip("This will get AuthenticationError. We're investigating...") partitions = await get_partitions(iot_connection_str) - client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) + client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) receivers = [] for p in partitions: receivers.append(client.create_consumer(consumer_group="$default", partition_id=p, event_position=EventPosition("-1"), prefetch=10, operation='/messages/events')) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py index ce060aecbb6d..ac5787b6b12e 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py @@ -14,7 +14,7 @@ @pytest.mark.liveTest def test_iothub_receive_sync(iot_connection_str, device_id): pytest.skip("current code will cause ErrorCodes.LinkRedirect") - client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) + client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') receiver._open() try: diff --git a/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py b/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py index c94f1f81c6c8..9660a79947bd 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_iothub_send.py @@ -16,7 +16,7 @@ @pytest.mark.liveTest def test_iothub_send_single_event(iot_connection_str, device_id): - client = EventHubClient.from_iothub_connection_string(iot_connection_str, network_tracing=False) + client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) sender = client.create_producer(operation='/messages/devicebound') try: sender.send(EventData(b"A single event", to_device=device_id)) From b3a6bb4f034527f3b7dc2c13cd053725a17cc931 Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 21 Jun 2019 09:56:21 -0700 Subject: [PATCH 26/54] small fix --- .../azure-eventhubs/tests/asynctests/test_reconnect_async.py | 2 +- sdk/eventhub/azure-eventhubs/tests/test_reconnect.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py index b85135366712..7ce3afc047ba 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py @@ -15,7 +15,7 @@ EventHubError) from azure.eventhub.aio import EventHubClient -SLEEP = False +SLEEP = True @pytest.mark.liveTest @pytest.mark.asyncio diff --git a/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py b/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py index 8b0f6ead59b1..67df0cb0d376 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py @@ -14,7 +14,7 @@ EventHubError, EventHubClient) -SLEEP = False +SLEEP = True @pytest.mark.liveTest def test_send_with_long_interval_sync(connstr_receivers): From 437a6fed494ae188a01a76a47705aaf76904d5ff Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 21 Jun 2019 10:00:51 -0700 Subject: [PATCH 27/54] docstring timeout from int to float --- .../azure-eventhubs/azure/eventhub/aio/receiver_async.py | 2 +- sdk/eventhub/azure-eventhubs/azure/eventhub/client.py | 2 +- .../azure-eventhubs/azure/eventhub/client_abstract.py | 6 +++--- sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py | 2 +- .../azure-eventhubs/azure/eventprocessorhost/eph.py | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py index c0d97c138f62..3cf285106c62 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py @@ -345,7 +345,7 @@ async def receive(self, max_batch_size=None, timeout=None): from an Event Hub. Results will be returned after timeout. If combined with max_batch_size, it will return after either the count of received events reaches the max_batch_size or the operation has timed out. - :type timeout: int + :type timeout: float :rtype: list[~azure.eventhub.common.EventData] Example: diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index f3fc234ca13d..3060d7dbd179 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -239,7 +239,7 @@ def create_producer(self, partition_id=None, operation=None, send_timeout=None): :type operation: str :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is queued. Default value is 60 seconds. If set to 0, there will be no timeout. - :type send_timeout: int + :type send_timeout: float :rtype: ~azure.eventhub.sender.EventHubProducer Example: diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 015bec880871..0b9f0a2dda69 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -115,7 +115,7 @@ def __init__(self, host, event_hub_path, credential, **kwargs): :type http_proxy: dict[str, Any] :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. - :type auth_timeout: int + :type auth_timeout: float :param user_agent: The user agent that needs to be appended to the built in user agent string. :type user_agent: str :param max_retries: The max number of attempts to redo the failed operation when an error happened. Default @@ -132,10 +132,10 @@ def __init__(self, host, event_hub_path, credential, **kwargs): :type max_batch_size: int :param receive_timeout: The timeout time in seconds to receive a batch of events from an Event Hub. Default value is 0 seconds. - :type receive_timeout: int + :type receive_timeout: float :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is queued. Default value is 60 seconds. If set to 0, there will be no timeout. - :type send_timeout: int + :type send_timeout: float """ self.container_id = "eventhub.pysdk-" + str(uuid.uuid4())[:8] self.address = _Address() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py index 2362fe4a0ce3..661ce983efb9 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py @@ -363,7 +363,7 @@ def receive(self, max_batch_size=None, timeout=None): from an Event Hub. Results will be returned after timeout. If combined with max_batch_size, it will return after either the count of received events reaches the max_batch_size or the operation has timed out. - :type timeout: int + :type timeout: float :rtype: list[~azure.eventhub.common.EventData] Example: diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/eph.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/eph.py index 2e464e10235c..90200d4698fd 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/eph.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/eph.py @@ -75,7 +75,7 @@ class EPHOptions: :vartype prefetch_count: int :ivar receive_timeout: The length of time a single partition receiver will wait in order to receive a batch of events. Default is 60 seconds. - :vartype receive_timeout: int + :vartype receive_timeout: float :ivar release_pump_on_timeout: Whether to shutdown an individual partition receiver if no events were received in the specified timeout. Shutting down the pump will release the lease to allow it to be picked up by another host. Default is False. From 168e522fa0a81eca25ea1735328fb2a32204fe8c Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 21 Jun 2019 10:05:37 -0700 Subject: [PATCH 28/54] fix TransportType import --- sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py index 72211d69076e..261d760e1569 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -from .constants import TransportType +from azure.eventhub import TransportType class Configuration(object): From 17c748f150c75bd2122884fd3a99159aede1b741 Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 21 Jun 2019 11:12:01 -0700 Subject: [PATCH 29/54] add pytest option sleep for reconnect test --- sdk/eventhub/azure-eventhubs/conftest.py | 12 ++++++++++++ .../tests/asynctests/test_reconnect_async.py | 15 +++++++-------- .../azure-eventhubs/tests/test_reconnect.py | 16 +++++++--------- 3 files changed, 26 insertions(+), 17 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/conftest.py b/sdk/eventhub/azure-eventhubs/conftest.py index 422b9dd20521..cf1881f76ef7 100644 --- a/sdk/eventhub/azure-eventhubs/conftest.py +++ b/sdk/eventhub/azure-eventhubs/conftest.py @@ -33,6 +33,18 @@ from azure.eventhub import EventHubClient, EventHubConsumer, EventPosition +def pytest_addoption(parser): + parser.addoption( + "--sleep", action="store", default="True", help="sleep on reconnect test: True or False" + ) + + +@pytest.fixture +def sleep(request): + sleep = request.config.getoption("--sleep") + return sleep.lower() in ('true', 'yes', '1', 'y') + + def get_logger(filename, level=logging.INFO): azure_logger = logging.getLogger("azure.eventhub") azure_logger.setLevel(level) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py index 7ce3afc047ba..91357e2553b9 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_reconnect_async.py @@ -15,18 +15,17 @@ EventHubError) from azure.eventhub.aio import EventHubClient -SLEEP = True @pytest.mark.liveTest @pytest.mark.asyncio -async def test_send_with_long_interval_async(connstr_receivers): +async def test_send_with_long_interval_async(connstr_receivers, sleep): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_producer() try: await sender.send(EventData(b"A single event")) for _ in range(1): - if SLEEP: + if sleep: await asyncio.sleep(300) else: sender._handler._connection._conn.destroy() @@ -36,7 +35,7 @@ async def test_send_with_long_interval_async(connstr_receivers): received = [] for r in receivers: - if not SLEEP: # if sender sleeps, the receivers will be disconnected. destroy connection to simulate + if not sleep: # if sender sleeps, the receivers will be disconnected. destroy connection to simulate r._handler._connection._conn.destroy() received.extend(r.receive(timeout=1)) assert len(received) == 2 @@ -56,20 +55,20 @@ def pump(receiver): @pytest.mark.liveTest @pytest.mark.asyncio -async def test_send_with_forced_conn_close_async(connstr_receivers): +async def test_send_with_forced_conn_close_async(connstr_receivers, sleep): pytest.skip("This test is similar to the above one") connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_producer() try: await sender.send(EventData(b"A single event")) - if SLEEP: + if sleep: await asyncio.sleep(300) else: sender._handler._connection._conn.destroy() await sender.send(EventData(b"A single event")) await sender.send(EventData(b"A single event")) - if SLEEP: + if sleep: await asyncio.sleep(300) else: sender._handler._connection._conn.destroy() @@ -80,7 +79,7 @@ async def test_send_with_forced_conn_close_async(connstr_receivers): received = [] for r in receivers: - if not SLEEP: + if not sleep: r._handler._connection._conn.destroy() received.extend(pump(r)) assert len(received) == 5 diff --git a/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py b/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py index 67df0cb0d376..223a759ea9c5 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_reconnect.py @@ -14,17 +14,15 @@ EventHubError, EventHubClient) -SLEEP = True - @pytest.mark.liveTest -def test_send_with_long_interval_sync(connstr_receivers): +def test_send_with_long_interval_sync(connstr_receivers, sleep): connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) sender = client.create_producer() with sender: sender.send(EventData(b"A single event")) for _ in range(1): - if SLEEP: + if sleep: time.sleep(300) else: sender._handler._connection._conn.destroy() @@ -32,7 +30,7 @@ def test_send_with_long_interval_sync(connstr_receivers): received = [] for r in receivers: - if not SLEEP: + if not sleep: r._handler._connection._conn.destroy() received.extend(r.receive(timeout=3)) @@ -41,7 +39,7 @@ def test_send_with_long_interval_sync(connstr_receivers): @pytest.mark.liveTest -def test_send_with_forced_conn_close_sync(connstr_receivers): +def test_send_with_forced_conn_close_sync(connstr_receivers, sleep): pytest.skip("This test is similar to the above one") connection_str, receivers = connstr_receivers client = EventHubClient.from_connection_string(connection_str, network_tracing=False) @@ -49,13 +47,13 @@ def test_send_with_forced_conn_close_sync(connstr_receivers): with sender: sender.send(EventData(b"A single event")) sender._handler._connection._conn.destroy() - if SLEEP: + if sleep: time.sleep(300) else: sender._handler._connection._conn.destroy() sender.send(EventData(b"A single event")) sender.send(EventData(b"A single event")) - if SLEEP: + if sleep: time.sleep(300) else: sender._handler._connection._conn.destroy() @@ -64,7 +62,7 @@ def test_send_with_forced_conn_close_sync(connstr_receivers): received = [] for r in receivers: - if not SLEEP: + if not sleep: r._handler._connection._conn.destroy() received.extend(r.receive(timeout=1)) assert len(received) == 5 From cb1017f1e28540a40bfc046f894261d9f20a47a4 Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 21 Jun 2019 11:18:29 -0700 Subject: [PATCH 30/54] Fix eventposition issue in receive test --- .../azure-eventhubs/tests/asynctests/test_receive_async.py | 4 ++-- sdk/eventhub/azure-eventhubs/tests/test_receive.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py index 3d0cf38ca743..ae696bf469b5 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_receive_async.py @@ -44,7 +44,7 @@ async def test_receive_with_offset_async(connstr_senders): assert len(received) == 1 offset = received[0].offset - offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=offset) + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset, inclusive=False)) async with offset_receiver: received = await offset_receiver.receive(timeout=5) assert len(received) == 0 @@ -68,7 +68,7 @@ async def test_receive_with_inclusive_offset_async(connstr_senders): assert len(received) == 1 offset = received[0].offset - offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset.value, inclusive=True)) + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset, inclusive=True)) async with offset_receiver: received = await offset_receiver.receive(timeout=5) assert len(received) == 1 diff --git a/sdk/eventhub/azure-eventhubs/tests/test_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_receive.py index 5b79c8c7ff06..35c5e39c992b 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_receive.py @@ -70,7 +70,7 @@ def test_receive_with_offset_sync(connstr_senders): assert list(received[0].body) == [b'Data'] assert received[0].body_as_str() == "Data" - offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=offset) + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset, inclusive=False)) with offset_receiver: received = offset_receiver.receive(timeout=5) assert len(received) == 0 @@ -97,7 +97,7 @@ def test_receive_with_inclusive_offset(connstr_senders): assert list(received[0].body) == [b'Data'] assert received[0].body_as_str() == "Data" - offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset.value, inclusive=True)) + offset_receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition(offset, inclusive=True)) with offset_receiver: received = offset_receiver.receive(timeout=5) assert len(received) == 1 From 1288a8bb5cf308e32afed24697f0d1751f44f950 Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 21 Jun 2019 11:19:11 -0700 Subject: [PATCH 31/54] fix constants import issue --- .../azure-eventhubs/azure/eventhub/aio/sender_async.py | 4 +--- sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py | 2 +- sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py index ec297d41ee3f..5398186d0e39 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py @@ -10,8 +10,6 @@ from uamqp import constants, errors, compat from uamqp import SendClientAsync -from azure.eventhub import MessageSendResult -from azure.eventhub import EventHubError from azure.eventhub.common import EventData, _BatchSendEventData from azure.eventhub.error import EventHubError, ConnectError, \ AuthenticationError, EventDataError, EventDataSendError, ConnectionLostError, _error_handler @@ -368,5 +366,5 @@ def _on_outcome(self, outcome, condition): @staticmethod def _error(outcome, condition): - if outcome != MessageSendResult.Ok: + if outcome != constants.MessageSendResult.Ok: raise condition diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py index 261d760e1569..ba8aee6249b0 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -from azure.eventhub import TransportType +from uamqp.constants import TransportType class Configuration(object): diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py index 83d8550234e7..b3b26293a44f 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py @@ -12,7 +12,6 @@ from uamqp import constants, errors from uamqp import compat from uamqp import SendClient -from uamqp.constants import MessageSendResult from azure.eventhub.common import EventData, _BatchSendEventData from azure.eventhub.error import EventHubError, ConnectError, \ @@ -363,5 +362,5 @@ def _on_outcome(self, outcome, condition): @staticmethod def _error(outcome, condition): - if outcome != MessageSendResult.Ok: + if outcome != constants.MessageSendResult.Ok: raise condition From f03fee11626925cf25500a8554cef56d2f845458 Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 21 Jun 2019 11:22:51 -0700 Subject: [PATCH 32/54] remove azure identity dev_req --- sdk/eventhub/azure-eventhubs/dev_requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhubs/dev_requirements.txt b/sdk/eventhub/azure-eventhubs/dev_requirements.txt index e79b92edb6d2..fa716ae38ebe 100644 --- a/sdk/eventhub/azure-eventhubs/dev_requirements.txt +++ b/sdk/eventhub/azure-eventhubs/dev_requirements.txt @@ -1,6 +1,5 @@ -e ../../servicebus/azure-servicebus -e ../../core/azure-core --e ../../core/azure-identity pytest>=3.4.1 pytest-asyncio>=0.8.0; python_version > '3.4' docutils>=0.14 From 980b4f0181e680e72024bbf9aa07deb416e28d55 Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 21 Jun 2019 11:26:42 -0700 Subject: [PATCH 33/54] fix some example issues --- sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py | 2 +- sdk/eventhub/azure-eventhubs/examples/proxy.py | 2 +- sdk/eventhub/azure-eventhubs/examples/recv.py | 2 +- sdk/eventhub/azure-eventhubs/examples/recv_batch.py | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py b/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py index fe20fc8503dd..925e16fd0629 100644 --- a/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py +++ b/sdk/eventhub/azure-eventhubs/examples/iterator_receiver.py @@ -22,7 +22,7 @@ USER = os.environ.get('EVENT_HUB_SAS_POLICY') KEY = os.environ.get('EVENT_HUB_SAS_KEY') -EVENT_POSITION = EventPosition.first_available_event() +EVENT_POSITION = EventPosition("-1") class PartitionConsumerThread(Thread): diff --git a/sdk/eventhub/azure-eventhubs/examples/proxy.py b/sdk/eventhub/azure-eventhubs/examples/proxy.py index e8d7e3fff9ad..0af2dda1a6ac 100644 --- a/sdk/eventhub/azure-eventhubs/examples/proxy.py +++ b/sdk/eventhub/azure-eventhubs/examples/proxy.py @@ -24,7 +24,7 @@ USER = os.environ.get('EVENT_HUB_SAS_POLICY') KEY = os.environ.get('EVENT_HUB_SAS_KEY') -EVENT_POSITION = EventPosition.first_available_event() +EVENT_POSITION = EventPosition("-1") PARTITION = "0" HTTP_PROXY = { 'proxy_hostname': '127.0.0.1', # proxy hostname diff --git a/sdk/eventhub/azure-eventhubs/examples/recv.py b/sdk/eventhub/azure-eventhubs/examples/recv.py index 7f78a8a5ad51..df9a8e4d687a 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv.py @@ -22,7 +22,7 @@ USER = os.environ.get('EVENT_HUB_SAS_POLICY') KEY = os.environ.get('EVENT_HUB_SAS_KEY') -EVENT_POSITION = EventPosition.first_available_event() +EVENT_POSITION = EventPosition("-1") PARTITION = "0" diff --git a/sdk/eventhub/azure-eventhubs/examples/recv_batch.py b/sdk/eventhub/azure-eventhubs/examples/recv_batch.py index 0b37769d0242..9b9edcd03a84 100644 --- a/sdk/eventhub/azure-eventhubs/examples/recv_batch.py +++ b/sdk/eventhub/azure-eventhubs/examples/recv_batch.py @@ -24,7 +24,7 @@ USER = os.environ.get('EVENT_HUB_SAS_POLICY') KEY = os.environ.get('EVENT_HUB_SAS_KEY') -EVENT_POSITION = EventPosition.first_available_event() +EVENT_POSITION = EventPosition("-1") PARTITION = "0" @@ -38,7 +38,7 @@ with consumer: batched_events = consumer.receive(max_batch_size=10) for event_data in batched_events: - last_offset = event_data.offset.value + last_offset = event_data.offset last_sn = event_data.sequence_number total += 1 print("Partition {}, Received {}, sn={} offset={}".format( From 4e2a6e1e9485d2ce41c05cf0eac0ea594ea73315 Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 21 Jun 2019 11:32:16 -0700 Subject: [PATCH 34/54] iot string fix --- sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index 0b9f0a2dda69..afb58280442f 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -210,7 +210,7 @@ def from_connection_string(cls, conn_str, event_hub_path=None, **kwargs): :caption: Create an EventHubClient from a connection string. """ - is_iot_conn_str = conn_str.lower().startswith("hostname") + is_iot_conn_str = conn_str.lstrip().lower().startswith("hostname") if not is_iot_conn_str: address, policy, key, entity = _parse_conn_str(conn_str) entity = event_hub_path or entity From 7a5aa4d2ce4b7cdaca6aafa358643a362708ec30 Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 21 Jun 2019 12:14:54 -0700 Subject: [PATCH 35/54] Change filename for consumer and producer --- sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py | 4 ++-- sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py | 6 +++--- .../aio/{event_hubs_client_async.py => client_async.py} | 4 ++-- .../eventhub/aio/{receiver_async.py => consumer_async.py} | 0 .../eventhub/aio/{sender_async.py => producer_async.py} | 0 sdk/eventhub/azure-eventhubs/azure/eventhub/client.py | 4 ++-- .../azure/eventhub/{receiver.py => consumer.py} | 0 .../azure/eventhub/{sender.py => producer.py} | 0 8 files changed, 9 insertions(+), 9 deletions(-) rename sdk/eventhub/azure-eventhubs/azure/eventhub/aio/{event_hubs_client_async.py => client_async.py} (99%) rename sdk/eventhub/azure-eventhubs/azure/eventhub/aio/{receiver_async.py => consumer_async.py} (100%) rename sdk/eventhub/azure-eventhubs/azure/eventhub/aio/{sender_async.py => producer_async.py} (100%) rename sdk/eventhub/azure-eventhubs/azure/eventhub/{receiver.py => consumer.py} (100%) rename sdk/eventhub/azure-eventhubs/azure/eventhub/{sender.py => producer.py} (100%) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py index 0f238404cfb2..f6722e7bd540 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/__init__.py @@ -9,8 +9,8 @@ from azure.eventhub.error import EventHubError, EventDataError, ConnectError, \ AuthenticationError, EventDataSendError, ConnectionLostError from azure.eventhub.client import EventHubClient -from azure.eventhub.sender import EventHubProducer -from azure.eventhub.receiver import EventHubConsumer +from azure.eventhub.producer import EventHubProducer +from azure.eventhub.consumer import EventHubConsumer from uamqp import constants from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py index f0361bdc038b..b2951e5a02eb 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/__init__.py @@ -1,6 +1,6 @@ -from .event_hubs_client_async import EventHubClient -from .receiver_async import EventHubConsumer -from .sender_async import EventHubProducer +from .client_async import EventHubClient +from .consumer_async import EventHubConsumer +from .producer_async import EventHubProducer __all__ = [ "EventHubClient", diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py similarity index 99% rename from sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py rename to sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index 5e7d38be2fe4..dec943455ee2 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/event_hubs_client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -19,8 +19,8 @@ from azure.eventhub.common import parse_sas_token, EventPosition, EventHubSharedKeyCredential, EventHubSASTokenCredential from ..client_abstract import EventHubClientAbstract -from .sender_async import EventHubProducer -from .receiver_async import EventHubConsumer +from .producer_async import EventHubProducer +from .consumer_async import EventHubConsumer log = logging.getLogger(__name__) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py similarity index 100% rename from sdk/eventhub/azure-eventhubs/azure/eventhub/aio/receiver_async.py rename to sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py similarity index 100% rename from sdk/eventhub/azure-eventhubs/azure/eventhub/aio/sender_async.py rename to sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index 3060d7dbd179..f07723cf970e 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -19,8 +19,8 @@ from uamqp import authentication from uamqp import constants -from azure.eventhub.sender import EventHubProducer -from azure.eventhub.receiver import EventHubConsumer +from azure.eventhub.producer import EventHubProducer +from azure.eventhub.consumer import EventHubConsumer from azure.eventhub.common import parse_sas_token, EventPosition from .client_abstract import EventHubClientAbstract from .common import EventHubSASTokenCredential, EventHubSharedKeyCredential diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py similarity index 100% rename from sdk/eventhub/azure-eventhubs/azure/eventhub/receiver.py rename to sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py similarity index 100% rename from sdk/eventhub/azure-eventhubs/azure/eventhub/sender.py rename to sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py From 613a6c41c7bad8df53eb429ae412b34031888351 Mon Sep 17 00:00:00 2001 From: Yunhao Ling <47871814+yunhaoling@users.noreply.github.com> Date: Fri, 21 Jun 2019 14:01:57 -0700 Subject: [PATCH 36/54] Vendor azure-storage-blob in eventprocesshost (#6018) * Update storage dependency * Vendor storage blob v1.3.1 in eph --- .../azure_storage_checkpoint_manager.py | 2 +- .../eventprocessorhost/vendor/__init__.py | 1 + .../vendor/storage/__init__.py | 1 + .../vendor/storage/blob/__init__.py | 31 + .../vendor/storage/blob/_constants.py | 14 + .../vendor/storage/blob/_deserialization.py | 452 +++ .../vendor/storage/blob/_download_chunking.py | 178 + .../vendor/storage/blob/_encryption.py | 187 + .../vendor/storage/blob/_error.py | 29 + .../vendor/storage/blob/_serialization.py | 118 + .../vendor/storage/blob/_upload_chunking.py | 496 +++ .../vendor/storage/blob/appendblobservice.py | 661 ++++ .../vendor/storage/blob/baseblobservice.py | 3280 +++++++++++++++++ .../vendor/storage/blob/blockblobservice.py | 1063 ++++++ .../vendor/storage/blob/models.py | 781 ++++ .../vendor/storage/blob/pageblobservice.py | 1394 +++++++ .../storage/blob/sharedaccesssignature.py | 180 + .../vendor/storage/common/__init__.py | 38 + .../vendor/storage/common/_auth.py | 117 + .../storage/common/_common_conversion.py | 126 + .../vendor/storage/common/_connection.py | 160 + .../vendor/storage/common/_constants.py | 47 + .../vendor/storage/common/_deserialization.py | 384 ++ .../vendor/storage/common/_encryption.py | 233 ++ .../vendor/storage/common/_error.py | 183 + .../vendor/storage/common/_http/__init__.py | 74 + .../vendor/storage/common/_http/httpclient.py | 107 + .../vendor/storage/common/_serialization.py | 371 ++ .../storage/common/cloudstorageaccount.py | 200 + .../vendor/storage/common/models.py | 672 ++++ .../vendor/storage/common/retry.py | 306 ++ .../storage/common/sharedaccesssignature.py | 217 ++ .../vendor/storage/common/storageclient.py | 391 ++ .../vendor/storage/common/tokencredential.py | 48 + sdk/eventhub/azure-eventhubs/setup.py | 4 +- 35 files changed, 12544 insertions(+), 2 deletions(-) create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/__init__.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/__init__.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/__init__.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_constants.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_deserialization.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_download_chunking.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_encryption.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_error.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_serialization.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_upload_chunking.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/appendblobservice.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/baseblobservice.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/blockblobservice.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/models.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/pageblobservice.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/sharedaccesssignature.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/__init__.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_auth.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_common_conversion.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_connection.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_constants.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_deserialization.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_encryption.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_error.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_http/__init__.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_http/httpclient.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_serialization.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/cloudstorageaccount.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/models.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/retry.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/sharedaccesssignature.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/storageclient.py create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/tokencredential.py diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/azure_storage_checkpoint_manager.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/azure_storage_checkpoint_manager.py index 05440824f23b..18acb52db82a 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/azure_storage_checkpoint_manager.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/azure_storage_checkpoint_manager.py @@ -12,7 +12,7 @@ import asyncio import requests -from azure.storage.blob import BlockBlobService +from .vendor.storage.blob import BlockBlobService from azure.eventprocessorhost.azure_blob_lease import AzureBlobLease from azure.eventprocessorhost.checkpoint import Checkpoint from azure.eventprocessorhost.abstract_lease_manager import AbstractLeaseManager diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/__init__.py new file mode 100644 index 000000000000..de40ea7ca058 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/__init__.py new file mode 100644 index 000000000000..de40ea7ca058 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/__init__.py new file mode 100644 index 000000000000..eb3e5d0fde33 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/__init__.py @@ -0,0 +1,31 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from .appendblobservice import AppendBlobService +from .blockblobservice import BlockBlobService +from .models import ( + Container, + ContainerProperties, + Blob, + BlobProperties, + BlobBlock, + BlobBlockList, + PageRange, + ContentSettings, + CopyProperties, + ContainerPermissions, + BlobPermissions, + _LeaseActions, + AppendBlockProperties, + PageBlobProperties, + ResourceProperties, + Include, + SequenceNumberAction, + BlockListType, + PublicAccess, + BlobPrefix, + DeleteSnapshot, +) +from .pageblobservice import PageBlobService diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_constants.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_constants.py new file mode 100644 index 000000000000..b450d83e430d --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_constants.py @@ -0,0 +1,14 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +__author__ = 'Microsoft Corp. ' +__version__ = '1.3.1' + +# x-ms-version for storage service. +X_MS_VERSION = '2018-03-28' + +# internal configurations, should not be changed +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_deserialization.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_deserialization.py new file mode 100644 index 000000000000..3365ebfa726c --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_deserialization.py @@ -0,0 +1,452 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from azure.common import AzureException +from dateutil import parser + +try: + from xml.etree import cElementTree as ETree +except ImportError: + from xml.etree import ElementTree as ETree + +from ..common._common_conversion import ( + _decode_base64_to_text, + _to_str, + _get_content_md5 +) +from ..common._deserialization import ( + _parse_properties, + _to_int, + _parse_metadata, + _convert_xml_to_signed_identifiers, + _bool, +) +from .models import ( + Container, + Blob, + BlobBlock, + BlobBlockList, + BlobBlockState, + BlobProperties, + PageRange, + ContainerProperties, + AppendBlockProperties, + PageBlobProperties, + ResourceProperties, + BlobPrefix, + AccountInformation, +) +from ._encryption import _decrypt_blob +from ..common.models import _list +from ..common._error import ( + _validate_content_match, + _ERROR_DECRYPTION_FAILURE, +) + + +def _parse_base_properties(response): + ''' + Extracts basic response headers. + ''' + resource_properties = ResourceProperties() + resource_properties.last_modified = parser.parse(response.headers.get('last-modified')) + resource_properties.etag = response.headers.get('etag') + + return resource_properties + + +def _parse_page_properties(response): + ''' + Extracts page response headers. + ''' + put_page = PageBlobProperties() + put_page.last_modified = parser.parse(response.headers.get('last-modified')) + put_page.etag = response.headers.get('etag') + put_page.sequence_number = _to_int(response.headers.get('x-ms-blob-sequence-number')) + + return put_page + + +def _parse_append_block(response): + ''' + Extracts append block response headers. + ''' + append_block = AppendBlockProperties() + append_block.last_modified = parser.parse(response.headers.get('last-modified')) + append_block.etag = response.headers.get('etag') + append_block.append_offset = _to_int(response.headers.get('x-ms-blob-append-offset')) + append_block.committed_block_count = _to_int(response.headers.get('x-ms-blob-committed-block-count')) + + return append_block + + +def _parse_snapshot_blob(response, name): + ''' + Extracts snapshot return header. + ''' + snapshot = response.headers.get('x-ms-snapshot') + + return _parse_blob(response, name, snapshot) + + +def _parse_lease(response): + ''' + Extracts lease time and ID return headers. + ''' + lease = {'time': response.headers.get('x-ms-lease-time')} + if lease['time']: + lease['time'] = _to_int(lease['time']) + + lease['id'] = response.headers.get('x-ms-lease-id') + + return lease + + +def _parse_blob(response, name, snapshot, validate_content=False, require_encryption=False, + key_encryption_key=None, key_resolver_function=None, start_offset=None, end_offset=None): + if response is None: + return None + + metadata = _parse_metadata(response) + props = _parse_properties(response, BlobProperties) + + # For range gets, only look at 'x-ms-blob-content-md5' for overall MD5 + content_settings = getattr(props, 'content_settings') + if 'content-range' in response.headers: + if 'x-ms-blob-content-md5' in response.headers: + setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-blob-content-md5'])) + else: + delattr(content_settings, 'content_md5') + + if validate_content: + computed_md5 = _get_content_md5(response.body) + _validate_content_match(response.headers['content-md5'], computed_md5) + + if key_encryption_key is not None or key_resolver_function is not None: + try: + response.body = _decrypt_blob(require_encryption, key_encryption_key, key_resolver_function, + response, start_offset, end_offset) + except: + raise AzureException(_ERROR_DECRYPTION_FAILURE) + + return Blob(name, snapshot, response.body, props, metadata) + + +def _parse_container(response, name): + if response is None: + return None + + metadata = _parse_metadata(response) + props = _parse_properties(response, ContainerProperties) + return Container(name, props, metadata) + + +def _convert_xml_to_signed_identifiers_and_access(response): + acl = _convert_xml_to_signed_identifiers(response) + acl.public_access = response.headers.get('x-ms-blob-public-access') + + return acl + + +def _convert_xml_to_containers(response): + ''' + + + string-value + string-value + int-value + + + container-name + + date/time-value + etag + locked | unlocked + available | leased | expired | breaking | broken + infinite | fixed + blob | container + true | false + true | false + + + value + + + + marker-value + + ''' + if response is None or response.body is None: + return None + + containers = _list() + list_element = ETree.fromstring(response.body) + + # Set next marker + setattr(containers, 'next_marker', list_element.findtext('NextMarker')) + + containers_element = list_element.find('Containers') + + for container_element in containers_element.findall('Container'): + # Name element + container = Container() + container.name = container_element.findtext('Name') + + # Metadata + metadata_root_element = container_element.find('Metadata') + if metadata_root_element is not None: + container.metadata = dict() + for metadata_element in metadata_root_element: + container.metadata[metadata_element.tag] = metadata_element.text + + # Properties + properties_element = container_element.find('Properties') + container.properties.etag = properties_element.findtext('Etag') + container.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified')) + container.properties.lease_status = properties_element.findtext('LeaseStatus') + container.properties.lease_state = properties_element.findtext('LeaseState') + container.properties.lease_duration = properties_element.findtext('LeaseDuration') + container.properties.public_access = properties_element.findtext('PublicAccess') + container.properties.has_immutability_policy = properties_element.findtext('HasImmutabilityPolicy') + container.properties.has_legal_hold = properties_element.findtext('HasLegalHold') + + # Add container to list + containers.append(container) + + return containers + + +LIST_BLOBS_ATTRIBUTE_MAP = { + 'Last-Modified': (None, 'last_modified', parser.parse), + 'Etag': (None, 'etag', _to_str), + 'x-ms-blob-sequence-number': (None, 'sequence_number', _to_int), + 'BlobType': (None, 'blob_type', _to_str), + 'Content-Length': (None, 'content_length', _to_int), + 'ServerEncrypted': (None, 'server_encrypted', _bool), + 'Content-Type': ('content_settings', 'content_type', _to_str), + 'Content-Encoding': ('content_settings', 'content_encoding', _to_str), + 'Content-Disposition': ('content_settings', 'content_disposition', _to_str), + 'Content-Language': ('content_settings', 'content_language', _to_str), + 'Content-MD5': ('content_settings', 'content_md5', _to_str), + 'Cache-Control': ('content_settings', 'cache_control', _to_str), + 'LeaseStatus': ('lease', 'status', _to_str), + 'LeaseState': ('lease', 'state', _to_str), + 'LeaseDuration': ('lease', 'duration', _to_str), + 'CopyId': ('copy', 'id', _to_str), + 'CopySource': ('copy', 'source', _to_str), + 'CopyStatus': ('copy', 'status', _to_str), + 'CopyProgress': ('copy', 'progress', _to_str), + 'CopyCompletionTime': ('copy', 'completion_time', _to_str), + 'CopyStatusDescription': ('copy', 'status_description', _to_str), + 'AccessTier': (None, 'blob_tier', _to_str), + 'AccessTierChangeTime': (None, 'blob_tier_change_time', parser.parse), + 'AccessTierInferred': (None, 'blob_tier_inferred', _bool), + 'ArchiveStatus': (None, 'rehydration_status', _to_str), + 'DeletedTime': (None, 'deleted_time', parser.parse), + 'RemainingRetentionDays': (None, 'remaining_retention_days', _to_int), + 'Creation-Time': (None, 'creation_time', parser.parse), +} + + +def _convert_xml_to_blob_list(response): + ''' + + + string-value + string-value + int-value + string-value + + + blob-name + true + date-time-value + + date-time-value + etag + size-in-bytes + blob-content-type + + + + + sequence-number + BlockBlob|PageBlob|AppendBlob + locked|unlocked + available | leased | expired | breaking | broken + infinite | fixed + id + pending | success | aborted | failed + source url + bytes copied/bytes total + datetime + error string + P4 | P6 | P10 | P20 | P30 | P40 | P50 | P60 | Archive | Cool | Hot + date-time-value + true + datetime + int + date-time-value + + + value + + + + blob-prefix + + + + + ''' + if response is None or response.body is None: + return None + + blob_list = _list() + list_element = ETree.fromstring(response.body) + + setattr(blob_list, 'next_marker', list_element.findtext('NextMarker')) + + blobs_element = list_element.find('Blobs') + blob_prefix_elements = blobs_element.findall('BlobPrefix') + if blob_prefix_elements is not None: + for blob_prefix_element in blob_prefix_elements: + prefix = BlobPrefix() + prefix.name = blob_prefix_element.findtext('Name') + blob_list.append(prefix) + + for blob_element in blobs_element.findall('Blob'): + blob = Blob() + blob.name = blob_element.findtext('Name') + blob.snapshot = blob_element.findtext('Snapshot') + + deleted = blob_element.findtext('Deleted') + if deleted: + blob.deleted = _bool(deleted) + + # Properties + properties_element = blob_element.find('Properties') + if properties_element is not None: + for property_element in properties_element: + info = LIST_BLOBS_ATTRIBUTE_MAP.get(property_element.tag) + if info is None: + setattr(blob.properties, property_element.tag, _to_str(property_element.text)) + elif info[0] is None: + setattr(blob.properties, info[1], info[2](property_element.text)) + else: + attr = getattr(blob.properties, info[0]) + setattr(attr, info[1], info[2](property_element.text)) + + # Metadata + metadata_root_element = blob_element.find('Metadata') + if metadata_root_element is not None: + blob.metadata = dict() + for metadata_element in metadata_root_element: + blob.metadata[metadata_element.tag] = metadata_element.text + + # Add blob to list + blob_list.append(blob) + + return blob_list + + +def _convert_xml_to_block_list(response): + ''' + + + + + base64-encoded-block-id + size-in-bytes + + + + + base64-encoded-block-id + size-in-bytes + + + + + Converts xml response to block list class. + ''' + if response is None or response.body is None: + return None + + block_list = BlobBlockList() + + list_element = ETree.fromstring(response.body) + + committed_blocks_element = list_element.find('CommittedBlocks') + if committed_blocks_element is not None: + for block_element in committed_blocks_element.findall('Block'): + block_id = _decode_base64_to_text(block_element.findtext('Name', '')) + block_size = int(block_element.findtext('Size')) + block = BlobBlock(id=block_id, state=BlobBlockState.Committed) + block._set_size(block_size) + block_list.committed_blocks.append(block) + + uncommitted_blocks_element = list_element.find('UncommittedBlocks') + if uncommitted_blocks_element is not None: + for block_element in uncommitted_blocks_element.findall('Block'): + block_id = _decode_base64_to_text(block_element.findtext('Name', '')) + block_size = int(block_element.findtext('Size')) + block = BlobBlock(id=block_id, state=BlobBlockState.Uncommitted) + block._set_size(block_size) + block_list.uncommitted_blocks.append(block) + + return block_list + + +def _convert_xml_to_page_ranges(response): + ''' + + + + Start Byte + End Byte + + + Start Byte + End Byte + + + Start Byte + End Byte + + + ''' + if response is None or response.body is None: + return None + + page_list = list() + + list_element = ETree.fromstring(response.body) + + for page_range_element in list_element: + if page_range_element.tag == 'PageRange': + is_cleared = False + elif page_range_element.tag == 'ClearRange': + is_cleared = True + else: + pass # ignore any unrecognized Page Range types + + page_list.append( + PageRange( + int(page_range_element.findtext('Start')), + int(page_range_element.findtext('End')), + is_cleared + ) + ) + + return page_list + + +def _parse_account_information(response): + account_info = AccountInformation() + account_info.sku_name = response.headers['x-ms-sku-name'] + account_info.account_kind = response.headers['x-ms-account-kind'] + + return account_info diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_download_chunking.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_download_chunking.py new file mode 100644 index 000000000000..e68a0e5dee42 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_download_chunking.py @@ -0,0 +1,178 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import threading + + +def _download_blob_chunks(blob_service, container_name, blob_name, snapshot, + download_size, block_size, progress, start_range, end_range, + stream, max_connections, progress_callback, validate_content, + lease_id, if_modified_since, if_unmodified_since, if_match, + if_none_match, timeout, operation_context): + + downloader_class = _ParallelBlobChunkDownloader if max_connections > 1 else _SequentialBlobChunkDownloader + + downloader = downloader_class( + blob_service, + container_name, + blob_name, + snapshot, + download_size, + block_size, + progress, + start_range, + end_range, + stream, + progress_callback, + validate_content, + lease_id, + if_modified_since, + if_unmodified_since, + if_match, + if_none_match, + timeout, + operation_context, + ) + + if max_connections > 1: + import concurrent.futures + executor = concurrent.futures.ThreadPoolExecutor(max_connections) + list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets())) + else: + for chunk in downloader.get_chunk_offsets(): + downloader.process_chunk(chunk) + + +class _BlobChunkDownloader(object): + def __init__(self, blob_service, container_name, blob_name, snapshot, download_size, + chunk_size, progress, start_range, end_range, stream, + progress_callback, validate_content, lease_id, if_modified_since, + if_unmodified_since, if_match, if_none_match, timeout, operation_context): + # identifiers for the blob + self.blob_service = blob_service + self.container_name = container_name + self.blob_name = blob_name + self.snapshot = snapshot + + # information on the download range/chunk size + self.chunk_size = chunk_size + self.download_size = download_size + self.start_index = start_range + self.blob_end = end_range + + # the destination that we will write to + self.stream = stream + + # progress related + self.progress_callback = progress_callback + self.progress_total = progress + + # parameters for each get blob operation + self.timeout = timeout + self.operation_context = operation_context + self.validate_content = validate_content + self.lease_id = lease_id + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since + self.if_match = if_match + self.if_none_match = if_none_match + + def get_chunk_offsets(self): + index = self.start_index + while index < self.blob_end: + yield index + index += self.chunk_size + + def process_chunk(self, chunk_start): + if chunk_start + self.chunk_size > self.blob_end: + chunk_end = self.blob_end + else: + chunk_end = chunk_start + self.chunk_size + + chunk_data = self._download_chunk(chunk_start, chunk_end).content + length = chunk_end - chunk_start + if length > 0: + self._write_to_stream(chunk_data, chunk_start) + self._update_progress(length) + + # should be provided by the subclass + def _update_progress(self, length): + pass + + # should be provided by the subclass + def _write_to_stream(self, chunk_data, chunk_start): + pass + + def _download_chunk(self, chunk_start, chunk_end): + response = self.blob_service._get_blob( + self.container_name, + self.blob_name, + snapshot=self.snapshot, + start_range=chunk_start, + end_range=chunk_end - 1, + validate_content=self.validate_content, + lease_id=self.lease_id, + if_modified_since=self.if_modified_since, + if_unmodified_since=self.if_unmodified_since, + if_match=self.if_match, + if_none_match=self.if_none_match, + timeout=self.timeout, + _context=self.operation_context + ) + + # This makes sure that if_match is set so that we can validate + # that subsequent downloads are to an unmodified blob + self.if_match = response.properties.etag + return response + + +class _ParallelBlobChunkDownloader(_BlobChunkDownloader): + def __init__(self, blob_service, container_name, blob_name, snapshot, download_size, + chunk_size, progress, start_range, end_range, stream, + progress_callback, validate_content, lease_id, if_modified_since, + if_unmodified_since, if_match, if_none_match, timeout, operation_context): + + super(_ParallelBlobChunkDownloader, self).__init__(blob_service, container_name, blob_name, snapshot, + download_size, + chunk_size, progress, start_range, end_range, stream, + progress_callback, validate_content, lease_id, + if_modified_since, + if_unmodified_since, if_match, if_none_match, timeout, + operation_context) + + # for a parallel download, the stream is always seekable, so we note down the current position + # in order to seek to the right place when out-of-order chunks come in + self.stream_start = stream.tell() + + # since parallel operations are going on + # it is essential to protect the writing and progress reporting operations + self.stream_lock = threading.Lock() + self.progress_lock = threading.Lock() + + def _update_progress(self, length): + if self.progress_callback is not None: + with self.progress_lock: + self.progress_total += length + total_so_far = self.progress_total + self.progress_callback(total_so_far, self.download_size) + + def _write_to_stream(self, chunk_data, chunk_start): + with self.stream_lock: + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + + +class _SequentialBlobChunkDownloader(_BlobChunkDownloader): + def __init__(self, *args): + super(_SequentialBlobChunkDownloader, self).__init__(*args) + + def _update_progress(self, length): + if self.progress_callback is not None: + self.progress_total += length + self.progress_callback(self.progress_total, self.download_size) + + def _write_to_stream(self, chunk_data, chunk_start): + # chunk_start is ignored in the case of sequential download since we cannot seek the destination stream + self.stream.write(chunk_data) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_encryption.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_encryption.py new file mode 100644 index 000000000000..f1e9b540b0bf --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_encryption.py @@ -0,0 +1,187 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from json import ( + dumps, + loads, +) +from os import urandom + +from cryptography.hazmat.primitives.padding import PKCS7 + +from ..common._encryption import ( + _generate_encryption_data_dict, + _generate_AES_CBC_cipher, + _dict_to_encryption_data, + _validate_and_unwrap_cek, + _EncryptionAlgorithm, +) +from ..common._error import ( + _validate_not_none, + _validate_key_encryption_key_wrap, + _ERROR_DATA_NOT_ENCRYPTED, + _ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM, +) + + +def _encrypt_blob(blob, key_encryption_key): + ''' + Encrypts the given blob using AES256 in CBC mode with 128 bit padding. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encryption metadata. This method should + only be used when a blob is small enough for single shot upload. Encrypting larger blobs + is done as a part of the _upload_blob_chunks method. + + :param bytes blob: + The blob to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. + :rtype: (str, bytes) + ''' + + _validate_not_none('blob', blob) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(blob) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + + return dumps(encryption_data), encrypted_data + + +def _generate_blob_encryption_data(key_encryption_key): + ''' + Generates the encryption_metadata for the blob. + + :param bytes key_encryption_key: + The key-encryption-key used to wrap the cek associate with this blob. + :return: A tuple containing the cek and iv for this blob as well as the + serialized encryption metadata for the blob. + :rtype: (bytes, bytes, str) + ''' + encryption_data = None + content_encryption_key = None + initialization_vector = None + if key_encryption_key: + _validate_key_encryption_key_wrap(key_encryption_key) + content_encryption_key = urandom(32) + initialization_vector = urandom(16) + encryption_data = _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector) + encryption_data['EncryptionMode'] = 'FullBlob' + encryption_data = dumps(encryption_data) + + return content_encryption_key, initialization_vector, encryption_data + + +def _decrypt_blob(require_encryption, key_encryption_key, key_resolver, + response, start_offset, end_offset): + ''' + Decrypts the given blob contents and returns only the requested range. + + :param bool require_encryption: + Whether or not the calling blob service requires objects to be decrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :param key_resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted blob content. + :rtype: bytes + ''' + _validate_not_none('response', response) + content = response.body + _validate_not_none('content', content) + + try: + encryption_data = _dict_to_encryption_data(loads(response.headers['x-ms-meta-encryptiondata'])) + except: + if require_encryption: + raise ValueError(_ERROR_DATA_NOT_ENCRYPTED) + else: + return content + + if not (encryption_data.encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_CBC_256): + raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM) + + blob_type = response.headers['x-ms-blob-type'] + + iv = None + unpad = False + start_range, end_range = 0, len(content) + if 'content-range' in response.headers: + content_range = response.headers['content-range'] + # Format: 'bytes x-y/size' + + # Ignore the word 'bytes' + content_range = content_range.split(' ') + + content_range = content_range[1].split('-') + start_range = int(content_range[0]) + content_range = content_range[1].split('/') + end_range = int(content_range[0]) + blob_size = int(content_range[1]) + + if start_offset >= 16: + iv = content[:16] + content = content[16:] + start_offset -= 16 + else: + iv = encryption_data.content_encryption_IV + + if end_range == blob_size - 1: + unpad = True + else: + unpad = True + iv = encryption_data.content_encryption_IV + + if blob_type == 'PageBlob': + unpad = False + + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) + cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) + decryptor = cipher.decryptor() + + content = decryptor.update(content) + decryptor.finalize() + if unpad: + unpadder = PKCS7(128).unpadder() + content = unpadder.update(content) + unpadder.finalize() + + return content[start_offset: len(content) - end_offset] + + +def _get_blob_encryptor_and_padder(cek, iv, should_pad): + encryptor = None + padder = None + + if cek is not None and iv is not None: + cipher = _generate_AES_CBC_cipher(cek, iv) + encryptor = cipher.encryptor() + padder = PKCS7(128).padder() if should_pad else None + + return encryptor, padder diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_error.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_error.py new file mode 100644 index 000000000000..f24edc81377e --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_error.py @@ -0,0 +1,29 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +_ERROR_PAGE_BLOB_SIZE_ALIGNMENT = \ + 'Invalid page blob size: {0}. ' + \ + 'The size must be aligned to a 512-byte boundary.' + +_ERROR_PAGE_BLOB_START_ALIGNMENT = \ + 'start_range must align with 512 page size' + +_ERROR_PAGE_BLOB_END_ALIGNMENT = \ + 'end_range must align with 512 page size' + +_ERROR_INVALID_BLOCK_ID = \ + 'All blocks in block list need to have valid block ids.' + +_ERROR_INVALID_LEASE_DURATION = \ + "lease_duration param needs to be between 15 and 60 or -1." + +_ERROR_INVALID_LEASE_BREAK_PERIOD = \ + "lease_break_period param needs to be between 0 and 60." + +_ERROR_NO_SINGLE_THREAD_CHUNKING = \ + 'To use blob chunk downloader more than 1 thread must be ' + \ + 'used since get_blob_to_bytes should be called for single threaded ' + \ + 'blob downloads.' diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_serialization.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_serialization.py new file mode 100644 index 000000000000..100b40898561 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_serialization.py @@ -0,0 +1,118 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from xml.sax.saxutils import escape as xml_escape + +try: + from xml.etree import cElementTree as ETree +except ImportError: + from xml.etree import ElementTree as ETree +from ..common._common_conversion import ( + _encode_base64, + _str, +) +from ..common._error import ( + _validate_not_none, + _ERROR_START_END_NEEDED_FOR_MD5, + _ERROR_RANGE_TOO_LARGE_FOR_MD5, +) +from ._error import ( + _ERROR_PAGE_BLOB_START_ALIGNMENT, + _ERROR_PAGE_BLOB_END_ALIGNMENT, + _ERROR_INVALID_BLOCK_ID, +) +from io import BytesIO + + +def _get_path(container_name=None, blob_name=None): + ''' + Creates the path to access a blob resource. + + container_name: + Name of container. + blob_name: + The path to the blob. + ''' + if container_name and blob_name: + return '/{0}/{1}'.format( + _str(container_name), + _str(blob_name)) + elif container_name: + return '/{0}'.format(_str(container_name)) + else: + return '/' + + +def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True, + end_range_required=True, check_content_md5=False, align_to_page=False): + # If end range is provided, start range must be provided + if start_range_required or end_range is not None: + _validate_not_none('start_range', start_range) + if end_range_required: + _validate_not_none('end_range', end_range) + + # Page ranges must be 512 aligned + if align_to_page: + if start_range is not None and start_range % 512 != 0: + raise ValueError(_ERROR_PAGE_BLOB_START_ALIGNMENT) + if end_range is not None and end_range % 512 != 511: + raise ValueError(_ERROR_PAGE_BLOB_END_ALIGNMENT) + + # Format based on whether end_range is present + request.headers = request.headers or {} + if end_range is not None: + request.headers['x-ms-range'] = 'bytes={0}-{1}'.format(start_range, end_range) + elif start_range is not None: + request.headers['x-ms-range'] = "bytes={0}-".format(start_range) + + # Content MD5 can only be provided for a complete range less than 4MB in size + if check_content_md5: + if start_range is None or end_range is None: + raise ValueError(_ERROR_START_END_NEEDED_FOR_MD5) + if end_range - start_range > 4 * 1024 * 1024: + raise ValueError(_ERROR_RANGE_TOO_LARGE_FOR_MD5) + + request.headers['x-ms-range-get-content-md5'] = 'true' + + +def _convert_block_list_to_xml(block_id_list): + ''' + + + first-base64-encoded-block-id + second-base64-encoded-block-id + third-base64-encoded-block-id + + + Convert a block list to xml to send. + + block_id_list: + A list of BlobBlock containing the block ids and block state that are used in put_block_list. + Only get block from latest blocks. + ''' + if block_id_list is None: + return '' + + block_list_element = ETree.Element('BlockList') + + # Enabled + for block in block_id_list: + if block.id is None: + raise ValueError(_ERROR_INVALID_BLOCK_ID) + id = xml_escape(_str(format(_encode_base64(block.id)))) + ETree.SubElement(block_list_element, block.state).text = id + + # Add xml declaration and serialize + try: + stream = BytesIO() + ETree.ElementTree(block_list_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml') + except: + raise + finally: + output = stream.getvalue() + stream.close() + + # return xml value + return output diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_upload_chunking.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_upload_chunking.py new file mode 100644 index 000000000000..b94f05811be7 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_upload_chunking.py @@ -0,0 +1,496 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation) +from threading import Lock + +from math import ceil + +from ..common._common_conversion import _encode_base64 +from ..common._error import _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM +from ..common._serialization import ( + url_quote, + _get_data_bytes_only, + _len_plus +) +from ._constants import ( + _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE +) +from ._encryption import ( + _get_blob_encryptor_and_padder, +) +from .models import BlobBlock + + +def _upload_blob_chunks(blob_service, container_name, blob_name, + blob_size, block_size, stream, max_connections, + progress_callback, validate_content, lease_id, uploader_class, + maxsize_condition=None, if_modified_since=None, if_unmodified_since=None, if_match=None, + if_none_match=None, timeout=None, + content_encryption_key=None, initialization_vector=None, resource_properties=None): + encryptor, padder = _get_blob_encryptor_and_padder(content_encryption_key, initialization_vector, + uploader_class is not _PageBlobChunkUploader) + + uploader = uploader_class( + blob_service, + container_name, + blob_name, + blob_size, + block_size, + stream, + max_connections > 1, + progress_callback, + validate_content, + lease_id, + timeout, + encryptor, + padder + ) + + uploader.maxsize_condition = maxsize_condition + + # Access conditions do not work with parallelism + if max_connections > 1: + uploader.if_match = uploader.if_none_match = uploader.if_modified_since = uploader.if_unmodified_since = None + else: + uploader.if_match = if_match + uploader.if_none_match = if_none_match + uploader.if_modified_since = if_modified_since + uploader.if_unmodified_since = if_unmodified_since + + if progress_callback is not None: + progress_callback(0, blob_size) + + if max_connections > 1: + import concurrent.futures + from threading import BoundedSemaphore + + ''' + Ensures we bound the chunking so we only buffer and submit 'max_connections' amount of work items to the executor. + This is necessary as the executor queue will keep accepting submitted work items, which results in buffering all the blocks if + the max_connections + 1 ensures the next chunk is already buffered and ready for when the worker thread is available. + ''' + chunk_throttler = BoundedSemaphore(max_connections + 1) + + executor = concurrent.futures.ThreadPoolExecutor(max_connections) + futures = [] + running_futures = [] + + # Check for exceptions and fail fast. + for chunk in uploader.get_chunk_streams(): + for f in running_futures: + if f.done(): + if f.exception(): + raise f.exception() + else: + running_futures.remove(f) + + chunk_throttler.acquire() + future = executor.submit(uploader.process_chunk, chunk) + + # Calls callback upon completion (even if the callback was added after the Future task is done). + future.add_done_callback(lambda x: chunk_throttler.release()) + futures.append(future) + running_futures.append(future) + + # result() will wait until completion and also raise any exceptions that may have been set. + range_ids = [f.result() for f in futures] + else: + range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] + + if resource_properties: + resource_properties.last_modified = uploader.last_modified + resource_properties.etag = uploader.etag + + return range_ids + + +def _upload_blob_substream_blocks(blob_service, container_name, blob_name, + blob_size, block_size, stream, max_connections, + progress_callback, validate_content, lease_id, uploader_class, + maxsize_condition=None, if_match=None, timeout=None): + uploader = uploader_class( + blob_service, + container_name, + blob_name, + blob_size, + block_size, + stream, + max_connections > 1, + progress_callback, + validate_content, + lease_id, + timeout, + None, + None + ) + + uploader.maxsize_condition = maxsize_condition + + # ETag matching does not work with parallelism as a ranged upload may start + # before the previous finishes and provides an etag + uploader.if_match = if_match if not max_connections > 1 else None + + if progress_callback is not None: + progress_callback(0, blob_size) + + if max_connections > 1: + import concurrent.futures + executor = concurrent.futures.ThreadPoolExecutor(max_connections) + range_ids = list(executor.map(uploader.process_substream_block, uploader.get_substream_blocks())) + else: + range_ids = [uploader.process_substream_block(result) for result in uploader.get_substream_blocks()] + + return range_ids + + +class _BlobChunkUploader(object): + def __init__(self, blob_service, container_name, blob_name, blob_size, + chunk_size, stream, parallel, progress_callback, + validate_content, lease_id, timeout, encryptor, padder): + self.blob_service = blob_service + self.container_name = container_name + self.blob_name = blob_name + self.blob_size = blob_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + self.stream_start = stream.tell() if parallel else None + self.stream_lock = Lock() if parallel else None + self.progress_callback = progress_callback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + self.validate_content = validate_content + self.lease_id = lease_id + self.timeout = timeout + self.encryptor = encryptor + self.padder = padder + self.last_modified = None + self.etag = None + + def get_chunk_streams(self): + index = 0 + while True: + data = b'' + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.blob_size: + read_size = min(self.chunk_size - len(data), self.blob_size - (index + len(data))) + temp = self.stream.read(read_size) + temp = _get_data_bytes_only('temp', temp) + data += temp + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b'' or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if len(data) > 0: + yield index, data + break + index += len(data) + + def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + def _update_progress(self, length): + if self.progress_callback is not None: + if self.progress_lock is not None: + with self.progress_lock: + self.progress_total += length + total = self.progress_total + else: + self.progress_total += length + total = self.progress_total + self.progress_callback(total, self.blob_size) + + def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = self._upload_chunk(chunk_offset, chunk_data) + self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.blob_size + + if blob_length is None: + blob_length = _len_plus(self.stream) + if blob_length is None: + raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('stream')) + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + yield ('BlockId{}'.format("%05d" % i), + _SubStream(self.stream, i * self.chunk_size, last_block_size if i == blocks - 1 else self.chunk_size, + lock)) + + def process_substream_block(self, block_data): + return self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + def _upload_substream_block_with_progress(self, block_id, block_stream): + range_id = self._upload_substream_block(block_id, block_stream) + self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class _BlockBlobChunkUploader(_BlobChunkUploader): + def _upload_chunk(self, chunk_offset, chunk_data): + block_id = url_quote(_encode_base64('{0:032d}'.format(chunk_offset))) + self.blob_service._put_block( + self.container_name, + self.blob_name, + chunk_data, + block_id, + validate_content=self.validate_content, + lease_id=self.lease_id, + timeout=self.timeout, + ) + return BlobBlock(block_id) + + def _upload_substream_block(self, block_id, block_stream): + try: + self.blob_service._put_block( + self.container_name, + self.blob_name, + block_stream, + block_id, + validate_content=self.validate_content, + lease_id=self.lease_id, + timeout=self.timeout, + ) + finally: + block_stream.close() + return BlobBlock(block_id) + + +class _PageBlobChunkUploader(_BlobChunkUploader): + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + for each_byte in chunk_data: + if each_byte != 0 and each_byte != b'\x00': + return False + return True + + def _upload_chunk(self, chunk_start, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_start + len(chunk_data) - 1 + resp = self.blob_service._update_page( + self.container_name, + self.blob_name, + chunk_data, + chunk_start, + chunk_end, + validate_content=self.validate_content, + lease_id=self.lease_id, + if_match=self.if_match, + timeout=self.timeout, + ) + + if not self.parallel: + self.if_match = resp.etag + + self.set_response_properties(resp) + + +class _AppendBlobChunkUploader(_BlobChunkUploader): + def _upload_chunk(self, chunk_offset, chunk_data): + if not hasattr(self, 'current_length'): + resp = self.blob_service.append_block( + self.container_name, + self.blob_name, + chunk_data, + validate_content=self.validate_content, + lease_id=self.lease_id, + maxsize_condition=self.maxsize_condition, + timeout=self.timeout, + if_modified_since=self.if_modified_since, + if_unmodified_since=self.if_unmodified_since, + if_match=self.if_match, + if_none_match=self.if_none_match + ) + + self.current_length = resp.append_offset + else: + resp = self.blob_service.append_block( + self.container_name, + self.blob_name, + chunk_data, + validate_content=self.validate_content, + lease_id=self.lease_id, + maxsize_condition=self.maxsize_condition, + appendpos_condition=self.current_length + chunk_offset, + timeout=self.timeout, + ) + + self.set_response_properties(resp) + + +class _SubStream(IOBase): + def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): + # Python 2.7: file-like objects created with open() typically support seek(), but are not + # derivations of io.IOBase and thus do not implement seekable(). + # Python > 3.0: file-like objects created with open() are derived from io.IOBase. + try: + # only the main thread runs this, so there's no need grabbing the lock + wrapped_stream.seek(0, SEEK_CUR) + except: + raise ValueError("Wrapped stream must support seek().") + + self._lock = lockObj + self._wrapped_stream = wrapped_stream + self._position = 0 + self._stream_begin_index = stream_begin_index + self._length = length + self._buffer = BytesIO() + + # we must avoid buffering more than necessary, and also not use up too much memory + # so the max buffer size is capped at 4MB + self._max_buffer_size = length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE \ + else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE + self._current_buffer_start = 0 + self._current_buffer_size = 0 + + def __len__(self): + return self._length + + def close(self): + if self._buffer: + self._buffer.close() + self._wrapped_stream = None + IOBase.close(self) + + def fileno(self): + return self._wrapped_stream.fileno() + + def flush(self): + pass + + def read(self, n): + if self.closed: + raise ValueError("Stream is closed.") + + # adjust if out of bounds + if n + self._position >= self._length: + n = self._length - self._position + + # return fast + if n is 0 or self._buffer.closed: + return b'' + + # attempt first read from the read buffer and update position + read_buffer = self._buffer.read(n) + bytes_read = len(read_buffer) + bytes_remaining = n - bytes_read + self._position += bytes_read + + # repopulate the read buffer from the underlying stream to fulfill the request + # ensure the seek and read operations are done atomically (only if a lock is provided) + if bytes_remaining > 0: + with self._buffer: + # either read in the max buffer size specified on the class + # or read in just enough data for the current block/sub stream + current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) + + # lock is only defined if max_connections > 1 (parallel uploads) + if self._lock: + with self._lock: + # reposition the underlying stream to match the start of the data to read + absolute_position = self._stream_begin_index + self._position + self._wrapped_stream.seek(absolute_position, SEEK_SET) + # If we can't seek to the right location, our read will be corrupted so fail fast. + if self._wrapped_stream.tell() != absolute_position: + raise IOError("Stream failed to seek to the desired location.") + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + else: + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + + if buffer_from_stream: + # update the buffer with new data from the wrapped stream + # we need to note down the start position and size of the buffer, in case seek is performed later + self._buffer = BytesIO(buffer_from_stream) + self._current_buffer_start = self._position + self._current_buffer_size = len(buffer_from_stream) + + # read the remaining bytes from the new buffer and update position + second_read_buffer = self._buffer.read(bytes_remaining) + read_buffer += second_read_buffer + self._position += len(second_read_buffer) + + return read_buffer + + def readable(self): + return True + + def readinto(self, b): + raise UnsupportedOperation + + def seek(self, offset, whence=0): + if whence is SEEK_SET: + start_index = 0 + elif whence is SEEK_CUR: + start_index = self._position + elif whence is SEEK_END: + start_index = self._length + offset = - offset + else: + raise ValueError("Invalid argument for the 'whence' parameter.") + + pos = start_index + offset + + if pos > self._length: + pos = self._length + elif pos < 0: + pos = 0 + + # check if buffer is still valid + # if not, drop buffer + if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: + self._buffer.close() + self._buffer = BytesIO() + else: # if yes seek to correct position + delta = pos - self._current_buffer_start + self._buffer.seek(delta, SEEK_SET) + + self._position = pos + return pos + + def seekable(self): + return True + + def tell(self): + return self._position + + def write(self): + raise UnsupportedOperation + + def writelines(self): + raise UnsupportedOperation + + def writeable(self): + return False diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/appendblobservice.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/appendblobservice.py new file mode 100644 index 000000000000..8369cb3727e9 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/appendblobservice.py @@ -0,0 +1,661 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import sys +from os import path + +from ..common._common_conversion import ( + _to_str, + _int_to_str, + _datetime_to_utc_string, + _get_content_md5, +) +from ..common._constants import ( + SERVICE_HOST_BASE, + DEFAULT_PROTOCOL, +) +from ..common._error import ( + _validate_not_none, + _validate_type_bytes, + _validate_encryption_unsupported, + _ERROR_VALUE_NEGATIVE, +) +from ..common._http import HTTPRequest +from ..common._serialization import ( + _get_data_bytes_only, + _add_metadata_headers, +) +from ._deserialization import ( + _parse_append_block, + _parse_base_properties, +) +from ._serialization import ( + _get_path, +) +from ._upload_chunking import ( + _AppendBlobChunkUploader, + _upload_blob_chunks, +) +from .baseblobservice import BaseBlobService +from .models import ( + _BlobTypes, + ResourceProperties +) + +if sys.version_info >= (3,): + from io import BytesIO +else: + from cStringIO import StringIO as BytesIO + + +class AppendBlobService(BaseBlobService): + ''' + An append blob is comprised of blocks and is optimized for append operations. + When you modify an append blob, blocks are added to the end of the blob only, + via the append_block operation. Updating or deleting of existing blocks is not + supported. Unlike a block blob, an append blob does not expose its block IDs. + + Each block in an append blob can be a different size, up to a maximum of 4 MB, + and an append blob can include up to 50,000 blocks. The maximum size of an + append blob is therefore slightly more than 195 GB (4 MB X 50,000 blocks). + + :ivar int MAX_BLOCK_SIZE: + The size of the blocks put by append_blob_from_* methods. Smaller blocks + may be put if there is less data provided. The maximum block size the service + supports is 4MB. + ''' + MAX_BLOCK_SIZE = 4 * 1024 * 1024 + + def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False, + protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None, request_session=None, + connection_string=None, socket_timeout=None, token_credential=None): + ''' + :param str account_name: + The storage account name. This is used to authenticate requests + signed with an account key and to construct the storage endpoint. It + is required unless a connection string is given, or if a custom + domain is used with anonymous authentication. + :param str account_key: + The storage account key. This is used for shared key authentication. + If neither account key or sas token is specified, anonymous access + will be used. + :param str sas_token: + A shared access signature token to use to authenticate requests + instead of the account key. If account key and sas token are both + specified, account key will be used to sign. If neither are + specified, anonymous access will be used. + :param bool is_emulated: + Whether to use the emulator. Defaults to False. If specified, will + override all other parameters besides connection string and request + session. + :param str protocol: + The protocol to use for requests. Defaults to https. + :param str endpoint_suffix: + The host base component of the url, minus the account name. Defaults + to Azure (core.windows.net). Override this to use the China cloud + (core.chinacloudapi.cn). + :param str custom_domain: + The custom domain to use. This can be set in the Azure Portal. For + example, 'www.mydomain.com'. + :param requests.Session request_session: + The session object to use for http requests. + :param str connection_string: + If specified, this will override all other parameters besides + request session. See + http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ + for the connection string format. + :param int socket_timeout: + If specified, this will override the default socket timeout. The timeout specified is in seconds. + See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. + :param token_credential: + A token credential used to authenticate HTTPS requests. The token value + should be updated before its expiration. + :type `~..common.TokenCredential` + ''' + self.blob_type = _BlobTypes.AppendBlob + super(AppendBlobService, self).__init__( + account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, + custom_domain, request_session, connection_string, socket_timeout, token_credential) + + def create_blob(self, container_name, blob_name, content_settings=None, + metadata=None, lease_id=None, + if_modified_since=None, if_unmodified_since=None, + if_match=None, if_none_match=None, timeout=None): + ''' + Creates a blob or overrides an existing blob. Use if_none_match=* to + prevent overriding an existing blob. + + See create_blob_from_* for high level + functions that handle the creation and upload of large blobs with + automatic chunking and progress notifications. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob to create or update. + :param ~azure.storage.blob.models.ContentSettings content_settings: + ContentSettings object used to set blob properties. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to + perform the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: ETag and last modified properties for the updated Append Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) + + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = {'timeout': _int_to_str(timeout)} + request.headers = { + 'x-ms-blob-type': _to_str(self.blob_type), + 'x-ms-lease-id': _to_str(lease_id), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match) + } + _add_metadata_headers(metadata, request) + if content_settings is not None: + request.headers.update(content_settings._to_headers()) + + return self._perform_request(request, _parse_base_properties) + + def append_block(self, container_name, blob_name, block, + validate_content=False, maxsize_condition=None, + appendpos_condition=None, + lease_id=None, if_modified_since=None, + if_unmodified_since=None, if_match=None, + if_none_match=None, timeout=None): + ''' + Commits a new block of data to the end of an existing append blob. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param bytes block: + Content of the block in bytes. + :param bool validate_content: + If true, calculates an MD5 hash of the block content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + blob. + :param int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :param int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the + AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: + ETag, last modified, append offset, and committed block count + properties for the updated Append Blob + :rtype: :class:`~azure.storage.blob.models.AppendBlockProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('block', block) + _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) + + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'appendblock', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-blob-condition-maxsize': _to_str(maxsize_condition), + 'x-ms-blob-condition-appendpos': _to_str(appendpos_condition), + 'x-ms-lease-id': _to_str(lease_id), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match) + } + request.body = _get_data_bytes_only('block', block) + + if validate_content: + computed_md5 = _get_content_md5(request.body) + request.headers['Content-MD5'] = _to_str(computed_md5) + + return self._perform_request(request, _parse_append_block) + + # ----Convenience APIs---------------------------------------------- + + def append_blob_from_path( + self, container_name, blob_name, file_path, validate_content=False, + maxsize_condition=None, progress_callback=None, lease_id=None, timeout=None, + if_modified_since=None, if_unmodified_since=None, if_match=None, + if_none_match=None): + ''' + Appends to the content of an existing blob from a file path, with automatic + chunking and progress notifications. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob to create or update. + :param str file_path: + Path of the file to upload as the blob content. + :param bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. + :param int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :param progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + :type progress_callback: func(current, total) + :param str lease_id: + Required if the blob has an active lease. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetime will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetime will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :return: ETag and last modified properties for the Append Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('file_path', file_path) + _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) + + count = path.getsize(file_path) + with open(file_path, 'rb') as stream: + return self.append_blob_from_stream( + container_name, + blob_name, + stream, + count=count, + validate_content=validate_content, + maxsize_condition=maxsize_condition, + progress_callback=progress_callback, + lease_id=lease_id, + timeout=timeout, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match) + + def append_blob_from_bytes( + self, container_name, blob_name, blob, index=0, count=None, + validate_content=False, maxsize_condition=None, progress_callback=None, + lease_id=None, timeout=None, if_modified_since=None, if_unmodified_since=None, if_match=None, + if_none_match=None): + ''' + Appends to the content of an existing blob from an array of bytes, with + automatic chunking and progress notifications. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob to create or update. + :param bytes blob: + Content of blob as an array of bytes. + :param int index: + Start index in the array of bytes. + :param int count: + Number of bytes to upload. Set to None or negative value to upload + all bytes starting from index. + :param bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. + :param int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :param progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + :type progress_callback: func(current, total) + :param str lease_id: + Required if the blob has an active lease. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetime will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetime will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :return: ETag and last modified properties for the Append Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('blob', blob) + _validate_not_none('index', index) + _validate_type_bytes('blob', blob) + _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) + + if index < 0: + raise IndexError(_ERROR_VALUE_NEGATIVE.format('index')) + + if count is None or count < 0: + count = len(blob) - index + + stream = BytesIO(blob) + stream.seek(index) + + return self.append_blob_from_stream( + container_name, + blob_name, + stream, + count=count, + validate_content=validate_content, + maxsize_condition=maxsize_condition, + lease_id=lease_id, + progress_callback=progress_callback, + timeout=timeout, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match) + + def append_blob_from_text( + self, container_name, blob_name, text, encoding='utf-8', + validate_content=False, maxsize_condition=None, progress_callback=None, + lease_id=None, timeout=None, if_modified_since=None, if_unmodified_since=None, if_match=None, + if_none_match=None): + ''' + Appends to the content of an existing blob from str/unicode, with + automatic chunking and progress notifications. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob to create or update. + :param str text: + Text to upload to the blob. + :param str encoding: + Python encoding to use to convert the text to bytes. + :param bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. + :param int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :param progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + :type progress_callback: func(current, total) + :param str lease_id: + Required if the blob has an active lease. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetime will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetime will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :return: ETag and last modified properties for the Append Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('text', text) + _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) + + if not isinstance(text, bytes): + _validate_not_none('encoding', encoding) + text = text.encode(encoding) + + return self.append_blob_from_bytes( + container_name, + blob_name, + text, + index=0, + count=len(text), + validate_content=validate_content, + maxsize_condition=maxsize_condition, + lease_id=lease_id, + progress_callback=progress_callback, + timeout=timeout, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match) + + def append_blob_from_stream( + self, container_name, blob_name, stream, count=None, + validate_content=False, maxsize_condition=None, progress_callback=None, + lease_id=None, timeout=None, if_modified_since=None, if_unmodified_since=None, if_match=None, + if_none_match=None): + ''' + Appends to the content of an existing blob from a file/stream, with + automatic chunking and progress notifications. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob to create or update. + :param io.IOBase stream: + Opened stream to upload as the blob content. + :param int count: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. + :param int maxsize_condition: + Conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :param progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + :type progress_callback: func(current, total) + :param str lease_id: + Required if the blob has an active lease. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetime will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetime will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :return: ETag and last modified properties for the Append Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('stream', stream) + _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) + + # _upload_blob_chunks returns the block ids for block blobs so resource_properties + # is passed as a parameter to get the last_modified and etag for page and append blobs. + # this info is not needed for block_blobs since _put_block_list is called after which gets this info + resource_properties = ResourceProperties() + _upload_blob_chunks( + blob_service=self, + container_name=container_name, + blob_name=blob_name, + blob_size=count, + block_size=self.MAX_BLOCK_SIZE, + stream=stream, + max_connections=1, # upload not easily parallelizable + progress_callback=progress_callback, + validate_content=validate_content, + lease_id=lease_id, + uploader_class=_AppendBlobChunkUploader, + maxsize_condition=maxsize_condition, + timeout=timeout, + resource_properties=resource_properties, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match + ) + + return resource_properties diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/baseblobservice.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/baseblobservice.py new file mode 100644 index 000000000000..adb9127ca5f5 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/baseblobservice.py @@ -0,0 +1,3280 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import sys +from abc import ABCMeta + +from azure.common import AzureHttpError + +from ..common._auth import ( + _StorageSASAuthentication, + _StorageSharedKeyAuthentication, + _StorageNoAuthentication, +) +from ..common._common_conversion import ( + _int_to_str, + _to_str, + _datetime_to_utc_string, +) +from ..common._connection import _ServiceParameters +from ..common._constants import ( + SERVICE_HOST_BASE, + DEFAULT_PROTOCOL, +) +from ..common._deserialization import ( + _convert_xml_to_service_properties, + _parse_metadata, + _parse_properties, + _convert_xml_to_service_stats, + _parse_length_from_content_range, +) +from ..common._error import ( + _dont_fail_not_exist, + _dont_fail_on_exist, + _validate_not_none, + _validate_decryption_required, + _validate_access_policies, + _ERROR_PARALLEL_NOT_SEEKABLE, +) +from ..common._http import HTTPRequest +from ..common._serialization import ( + _get_request_body, + _convert_signed_identifiers_to_xml, + _convert_service_properties_to_xml, + _add_metadata_headers, +) +from ..common.models import ( + Services, + ListGenerator, + _OperationContext, +) + +from .sharedaccesssignature import ( + BlobSharedAccessSignature, +) +from ..common.storageclient import StorageClient +from ._deserialization import ( + _convert_xml_to_containers, + _parse_blob, + _convert_xml_to_blob_list, + _parse_container, + _parse_snapshot_blob, + _parse_lease, + _convert_xml_to_signed_identifiers_and_access, + _parse_base_properties, + _parse_account_information, +) +from ._download_chunking import _download_blob_chunks +from ._error import ( + _ERROR_INVALID_LEASE_DURATION, + _ERROR_INVALID_LEASE_BREAK_PERIOD, +) +from ._serialization import ( + _get_path, + _validate_and_format_range_headers, +) +from .models import ( + BlobProperties, + _LeaseActions, + ContainerPermissions, + BlobPermissions, +) + +from ._constants import ( + X_MS_VERSION, + __version__ as package_version, +) + +_CONTAINER_ALREADY_EXISTS_ERROR_CODE = 'ContainerAlreadyExists' +_BLOB_NOT_FOUND_ERROR_CODE = 'BlobNotFound' +_CONTAINER_NOT_FOUND_ERROR_CODE = 'ContainerNotFound' + +if sys.version_info >= (3,): + from io import BytesIO +else: + from cStringIO import StringIO as BytesIO + + +class BaseBlobService(StorageClient): + ''' + This is the main class managing Blob resources. + + The Blob service stores text and binary data as blobs in the cloud. + The Blob service offers the following three resources: the storage account, + containers, and blobs. Within your storage account, containers provide a + way to organize sets of blobs. For more information please see: + https://msdn.microsoft.com/en-us/library/azure/ee691964.aspx + + :ivar int MAX_SINGLE_GET_SIZE: + The size of the first range get performed by get_blob_to_* methods if + max_connections is greater than 1. Less data will be returned if the + blob is smaller than this. + :ivar int MAX_CHUNK_GET_SIZE: + The size of subsequent range gets performed by get_blob_to_* methods if + max_connections is greater than 1 and the blob is larger than MAX_SINGLE_GET_SIZE. + Less data will be returned if the remainder of the blob is smaller than + this. If this is set to larger than 4MB, content_validation will throw an + error if enabled. However, if content_validation is not desired a size + greater than 4MB may be optimal. Setting this below 4MB is not recommended. + :ivar object key_encryption_key: + The key-encryption-key optionally provided by the user. If provided, will be used to + encrypt/decrypt in supported methods. + For methods requiring decryption, either the key_encryption_key OR the resolver must be provided. + If both are provided, the resolver will take precedence. + Must implement the following methods for APIs requiring encryption: + wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + Must implement the following methods for APIs requiring decryption: + unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm. + get_kid()--returns a string key id for this key-encryption-key. + :ivar function key_resolver_function(kid): + A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods. + For methods requiring decryption, either the key_encryption_key OR + the resolver must be provided. If both are provided, the resolver will take precedence. + It uses the kid string to return a key-encryption-key implementing the interface defined above. + :ivar bool require_encryption: + A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and + successfully read from the queue are/were encrypted while on the server. If this flag is set, all required + parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver. + ''' + + __metaclass__ = ABCMeta + MAX_SINGLE_GET_SIZE = 32 * 1024 * 1024 + MAX_CHUNK_GET_SIZE = 4 * 1024 * 1024 + + def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False, + protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None, request_session=None, + connection_string=None, socket_timeout=None, token_credential=None): + ''' + :param str account_name: + The storage account name. This is used to authenticate requests + signed with an account key and to construct the storage endpoint. It + is required unless a connection string is given, or if a custom + domain is used with anonymous authentication. + :param str account_key: + The storage account key. This is used for shared key authentication. + If neither account key or sas token is specified, anonymous access + will be used. + :param str sas_token: + A shared access signature token to use to authenticate requests + instead of the account key. If account key and sas token are both + specified, account key will be used to sign. If neither are + specified, anonymous access will be used. + :param bool is_emulated: + Whether to use the emulator. Defaults to False. If specified, will + override all other parameters besides connection string and request + session. + :param str protocol: + The protocol to use for requests. Defaults to https. + :param str endpoint_suffix: + The host base component of the url, minus the account name. Defaults + to Azure (core.windows.net). Override this to use the China cloud + (core.chinacloudapi.cn). + :param str custom_domain: + The custom domain to use. This can be set in the Azure Portal. For + example, 'www.mydomain.com'. + :param requests.Session request_session: + The session object to use for http requests. + :param str connection_string: + If specified, this will override all other parameters besides + request session. See + http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ + for the connection string format + :param int socket_timeout: + If specified, this will override the default socket timeout. The timeout specified is in seconds. + See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. + :param token_credential: + A token credential used to authenticate HTTPS requests. The token value + should be updated before its expiration. + :type `~..common.TokenCredential` + ''' + service_params = _ServiceParameters.get_service_parameters( + 'blob', + account_name=account_name, + account_key=account_key, + sas_token=sas_token, + token_credential=token_credential, + is_emulated=is_emulated, + protocol=protocol, + endpoint_suffix=endpoint_suffix, + custom_domain=custom_domain, + request_session=request_session, + connection_string=connection_string, + socket_timeout=socket_timeout) + + super(BaseBlobService, self).__init__(service_params) + + if self.account_key: + self.authentication = _StorageSharedKeyAuthentication( + self.account_name, + self.account_key, + self.is_emulated + ) + elif self.sas_token: + self.authentication = _StorageSASAuthentication(self.sas_token) + elif self.token_credential: + self.authentication = self.token_credential + else: + self.authentication = _StorageNoAuthentication() + + self.require_encryption = False + self.key_encryption_key = None + self.key_resolver_function = None + self._X_MS_VERSION = X_MS_VERSION + self._update_user_agent_string(package_version) + + def make_blob_url(self, container_name, blob_name, protocol=None, sas_token=None, snapshot=None): + ''' + Creates the url to access a blob. + + :param str container_name: + Name of container. + :param str blob_name: + Name of blob. + :param str protocol: + Protocol to use: 'http' or 'https'. If not specified, uses the + protocol specified when BaseBlobService was initialized. + :param str sas_token: + Shared access signature token created with + generate_shared_access_signature. + :param str snapshot: + An string value that uniquely identifies the snapshot. The value of + this query parameter indicates the snapshot version. + :return: blob access URL. + :rtype: str + ''' + + url = '{}://{}/{}/{}'.format( + protocol or self.protocol, + self.primary_endpoint, + container_name, + blob_name, + ) + + if snapshot and sas_token: + url = '{}?snapshot={}&{}'.format(url, snapshot, sas_token) + elif snapshot: + url = '{}?snapshot={}'.format(url, snapshot) + elif sas_token: + url = '{}?{}'.format(url, sas_token) + + return url + + def make_container_url(self, container_name, protocol=None, sas_token=None): + ''' + Creates the url to access a container. + + :param str container_name: + Name of container. + :param str protocol: + Protocol to use: 'http' or 'https'. If not specified, uses the + protocol specified when BaseBlobService was initialized. + :param str sas_token: + Shared access signature token created with + generate_shared_access_signature. + :return: container access URL. + :rtype: str + ''' + + url = '{}://{}/{}?restype=container'.format( + protocol or self.protocol, + self.primary_endpoint, + container_name, + ) + + if sas_token: + url = '{}&{}'.format(url, sas_token) + + return url + + def generate_account_shared_access_signature(self, resource_types, permission, + expiry, start=None, ip=None, protocol=None): + ''' + Generates a shared access signature for the blob service. + Use the returned signature with the sas_token parameter of any BlobService. + + :param ResourceTypes resource_types: + Specifies the resource types that are accessible with the account SAS. + :param AccountPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~..common.models.Protocol` for possible values. + :return: A Shared Access Signature (sas) token. + :rtype: str + ''' + _validate_not_none('self.account_name', self.account_name) + _validate_not_none('self.account_key', self.account_key) + + sas = BlobSharedAccessSignature(self.account_name, self.account_key) + return sas.generate_account(Services.BLOB, resource_types, permission, + expiry, start=start, ip=ip, protocol=protocol) + + def generate_container_shared_access_signature(self, container_name, + permission=None, expiry=None, + start=None, id=None, ip=None, protocol=None, + cache_control=None, content_disposition=None, + content_encoding=None, content_language=None, + content_type=None): + ''' + Generates a shared access signature for the container. + Use the returned signature with the sas_token parameter of any BlobService. + + :param str container_name: + Name of container. + :param ContainerPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_blob_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~..common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :return: A Shared Access Signature (sas) token. + :rtype: str + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('self.account_name', self.account_name) + _validate_not_none('self.account_key', self.account_key) + + sas = BlobSharedAccessSignature(self.account_name, self.account_key) + return sas.generate_container( + container_name, + permission, + expiry, + start=start, + id=id, + ip=ip, + protocol=protocol, + cache_control=cache_control, + content_disposition=content_disposition, + content_encoding=content_encoding, + content_language=content_language, + content_type=content_type, + ) + + def generate_blob_shared_access_signature( + self, container_name, blob_name, permission=None, + expiry=None, start=None, id=None, ip=None, protocol=None, + cache_control=None, content_disposition=None, + content_encoding=None, content_language=None, + content_type=None): + ''' + Generates a shared access signature for the blob. + Use the returned signature with the sas_token parameter of any BlobService. + + :param str container_name: + Name of container. + :param str blob_name: + Name of blob. + :param BlobPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use :func:`~set_container_acl`. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~..common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :return: A Shared Access Signature (sas) token. + :rtype: str + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('self.account_name', self.account_name) + _validate_not_none('self.account_key', self.account_key) + + sas = BlobSharedAccessSignature(self.account_name, self.account_key) + return sas.generate_blob( + container_name, + blob_name, + permission, + expiry, + start=start, + id=id, + ip=ip, + protocol=protocol, + cache_control=cache_control, + content_disposition=content_disposition, + content_encoding=content_encoding, + content_language=content_language, + content_type=content_type, + ) + + def list_containers(self, prefix=None, num_results=None, include_metadata=False, + marker=None, timeout=None): + ''' + Returns a generator to list the containers under the specified account. + The generator will lazily follow the continuation tokens returned by + the service and stop when all containers have been returned or num_results is reached. + + If num_results is specified and the account has more than that number of + containers, the generator will have a populated next_marker field once it + finishes. This marker can be used to create a new generator if more + results are desired. + + :param str prefix: + Filters the results to return only containers whose names + begin with the specified prefix. + :param int num_results: + Specifies the maximum number of containers to return. A single list + request may return up to 1000 contianers and potentially a continuation + token which should be followed to get additional resutls. + :param bool include_metadata: + Specifies that container metadata be returned in the response. + :param str marker: + An opaque continuation token. This value can be retrieved from the + next_marker field of a previous generator object if num_results was + specified and that generator has finished enumerating results. If + specified, this generator will begin returning results from the point + where the previous generator stopped. + :param int timeout: + The timeout parameter is expressed in seconds. + ''' + include = 'metadata' if include_metadata else None + operation_context = _OperationContext(location_lock=True) + kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, + 'include': include, 'timeout': timeout, '_context': operation_context} + resp = self._list_containers(**kwargs) + + return ListGenerator(resp, self._list_containers, (), kwargs) + + def _list_containers(self, prefix=None, marker=None, max_results=None, + include=None, timeout=None, _context=None): + ''' + Returns a list of the containers under the specified account. + + :param str prefix: + Filters the results to return only containers whose names + begin with the specified prefix. + :param str marker: + A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns + a next_marker value within the response body if the list returned was + not complete. The marker value may then be used in a subsequent + call to request the next set of list items. The marker value is + opaque to the client. + :param int max_results: + Specifies the maximum number of containers to return. A single list + request may return up to 1000 contianers and potentially a continuation + token which should be followed to get additional resutls. + :param str include: + Include this parameter to specify that the container's + metadata be returned as part of the response body. set this + parameter to string 'metadata' to get container's metadata. + :param int timeout: + The timeout parameter is expressed in seconds. + ''' + request = HTTPRequest() + request.method = 'GET' + request.host_locations = self._get_host_locations(secondary=True) + request.path = _get_path() + request.query = { + 'comp': 'list', + 'prefix': _to_str(prefix), + 'marker': _to_str(marker), + 'maxresults': _int_to_str(max_results), + 'include': _to_str(include), + 'timeout': _int_to_str(timeout) + } + + return self._perform_request(request, _convert_xml_to_containers, operation_context=_context) + + def create_container(self, container_name, metadata=None, + public_access=None, fail_on_exist=False, timeout=None): + ''' + Creates a new container under the specified account. If the container + with the same name already exists, the operation fails if + fail_on_exist is True. + + :param str container_name: + Name of container to create. + :param metadata: + A dict with name_value pairs to associate with the + container as metadata. Example:{'Category':'test'} + :type metadata: dict(str, str) + :param ~azure.storage.blob.models.PublicAccess public_access: + Possible values include: container, blob. + :param bool fail_on_exist: + Specify whether to throw an exception when the container exists. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: True if container is created, False if container already exists. + :rtype: bool + ''' + _validate_not_none('container_name', container_name) + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name) + request.query = { + 'restype': 'container', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-blob-public-access': _to_str(public_access) + } + _add_metadata_headers(metadata, request) + + if not fail_on_exist: + try: + self._perform_request(request, expected_errors=[_CONTAINER_ALREADY_EXISTS_ERROR_CODE]) + return True + except AzureHttpError as ex: + _dont_fail_on_exist(ex) + return False + else: + self._perform_request(request) + return True + + def get_container_properties(self, container_name, lease_id=None, timeout=None): + ''' + Returns all user-defined metadata and system properties for the specified + container. The data returned does not include the container's list of blobs. + + :param str container_name: + Name of existing container. + :param str lease_id: + If specified, get_container_properties only succeeds if the + container's lease is active and matches this ID. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: properties for the specified container within a container object. + :rtype: :class:`~azure.storage.blob.models.Container` + ''' + _validate_not_none('container_name', container_name) + request = HTTPRequest() + request.method = 'GET' + request.host_locations = self._get_host_locations(secondary=True) + request.path = _get_path(container_name) + request.query = { + 'restype': 'container', + 'timeout': _int_to_str(timeout), + } + request.headers = {'x-ms-lease-id': _to_str(lease_id)} + + return self._perform_request(request, _parse_container, [container_name]) + + def get_container_metadata(self, container_name, lease_id=None, timeout=None): + ''' + Returns all user-defined metadata for the specified container. + + :param str container_name: + Name of existing container. + :param str lease_id: + If specified, get_container_metadata only succeeds if the + container's lease is active and matches this ID. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: + A dictionary representing the container metadata name, value pairs. + :rtype: dict(str, str) + ''' + _validate_not_none('container_name', container_name) + request = HTTPRequest() + request.method = 'GET' + request.host_locations = self._get_host_locations(secondary=True) + request.path = _get_path(container_name) + request.query = { + 'restype': 'container', + 'comp': 'metadata', + 'timeout': _int_to_str(timeout), + } + request.headers = {'x-ms-lease-id': _to_str(lease_id)} + + return self._perform_request(request, _parse_metadata) + + def set_container_metadata(self, container_name, metadata=None, + lease_id=None, if_modified_since=None, timeout=None): + ''' + Sets one or more user-defined name-value pairs for the specified + container. Each call to this operation replaces all existing metadata + attached to the container. To remove all metadata from the container, + call this operation with no metadata dict. + + :param str container_name: + Name of existing container. + :param metadata: + A dict containing name-value pairs to associate with the container as + metadata. Example: {'category':'test'} + :type metadata: dict(str, str) + :param str lease_id: + If specified, set_container_metadata only succeeds if the + container's lease is active and matches this ID. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: ETag and last modified properties for the updated Container + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name) + request.query = { + 'restype': 'container', + 'comp': 'metadata', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'x-ms-lease-id': _to_str(lease_id), + } + _add_metadata_headers(metadata, request) + + return self._perform_request(request, _parse_base_properties) + + def get_container_acl(self, container_name, lease_id=None, timeout=None): + ''' + Gets the permissions for the specified container. + The permissions indicate whether container data may be accessed publicly. + + :param str container_name: + Name of existing container. + :param lease_id: + If specified, get_container_acl only succeeds if the + container's lease is active and matches this ID. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: A dictionary of access policies associated with the container. dict of str to + :class:`..common.models.AccessPolicy` and a public_access property + if public access is turned on + ''' + _validate_not_none('container_name', container_name) + request = HTTPRequest() + request.method = 'GET' + request.host_locations = self._get_host_locations(secondary=True) + request.path = _get_path(container_name) + request.query = { + 'restype': 'container', + 'comp': 'acl', + 'timeout': _int_to_str(timeout), + } + request.headers = {'x-ms-lease-id': _to_str(lease_id)} + + return self._perform_request(request, _convert_xml_to_signed_identifiers_and_access) + + def set_container_acl(self, container_name, signed_identifiers=None, + public_access=None, lease_id=None, + if_modified_since=None, if_unmodified_since=None, timeout=None): + ''' + Sets the permissions for the specified container or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether blobs in a container may be accessed publicly. + + :param str container_name: + Name of existing container. + :param signed_identifiers: + A dictionary of access policies to associate with the container. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict(str, :class:`~..common.models.AccessPolicy`) + :param ~azure.storage.blob.models.PublicAccess public_access: + Possible values include: container, blob. + :param str lease_id: + If specified, set_container_acl only succeeds if the + container's lease is active and matches this ID. + :param datetime if_modified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified date/time. + :param datetime if_unmodified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: ETag and last modified properties for the updated Container + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_access_policies(signed_identifiers) + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name) + request.query = { + 'restype': 'container', + 'comp': 'acl', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-blob-public-access': _to_str(public_access), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'x-ms-lease-id': _to_str(lease_id), + } + request.body = _get_request_body( + _convert_signed_identifiers_to_xml(signed_identifiers)) + + return self._perform_request(request, _parse_base_properties) + + def delete_container(self, container_name, fail_not_exist=False, + lease_id=None, if_modified_since=None, + if_unmodified_since=None, timeout=None): + ''' + Marks the specified container for deletion. The container and any blobs + contained within it are later deleted during garbage collection. + + :param str container_name: + Name of container to delete. + :param bool fail_not_exist: + Specify whether to throw an exception when the container doesn't + exist. + :param str lease_id: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: True if container is deleted, False container doesn't exist. + :rtype: bool + ''' + _validate_not_none('container_name', container_name) + request = HTTPRequest() + request.method = 'DELETE' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name) + request.query = { + 'restype': 'container', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-lease-id': _to_str(lease_id), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + } + + if not fail_not_exist: + try: + self._perform_request(request, expected_errors=[_CONTAINER_NOT_FOUND_ERROR_CODE]) + return True + except AzureHttpError as ex: + _dont_fail_not_exist(ex) + return False + else: + self._perform_request(request) + return True + + def _lease_container_impl( + self, container_name, lease_action, lease_id, lease_duration, + lease_break_period, proposed_lease_id, if_modified_since, + if_unmodified_since, timeout): + ''' + Establishes and manages a lease on a container. + The Lease Container operation can be called in one of five modes + Acquire, to request a new lease + Renew, to renew an existing lease + Change, to change the ID of an existing lease + Release, to free the lease if it is no longer needed so that another + client may immediately acquire a lease against the container + Break, to end the lease but ensure that another client cannot acquire + a new lease until the current lease period has expired + + :param str container_name: + Name of existing container. + :param str lease_action: + Possible _LeaseActions values: acquire|renew|release|break|change + :param str lease_id: + Required if the container has an active lease. + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. For backwards compatibility, the default is + 60, and the value is only used on an acquire operation. + :param int lease_break_period: + For a break operation, this is the proposed duration of + seconds that the lease should continue before it is broken, between + 0 and 60 seconds. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining + on the lease is used. A new lease will not be available before the + break period has expired, but the lease may be held for longer than + the break period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :param str proposed_lease_id: + Optional for Acquire, required for Change. Proposed lease ID, in a + GUID string format. The Blob service returns 400 (Invalid request) + if the proposed lease ID is not in the correct format. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: + Response headers returned from the service call. + :rtype: dict(str, str) + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('lease_action', lease_action) + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name) + request.query = { + 'restype': 'container', + 'comp': 'lease', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-lease-id': _to_str(lease_id), + 'x-ms-lease-action': _to_str(lease_action), + 'x-ms-lease-duration': _to_str(lease_duration), + 'x-ms-lease-break-period': _to_str(lease_break_period), + 'x-ms-proposed-lease-id': _to_str(proposed_lease_id), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + } + + return self._perform_request(request, _parse_lease) + + def acquire_container_lease( + self, container_name, lease_duration=-1, proposed_lease_id=None, + if_modified_since=None, if_unmodified_since=None, timeout=None): + ''' + Requests a new lease. If the container does not have an active lease, + the Blob service creates a lease on the container and returns a new + lease ID. + + :param str container_name: + Name of existing container. + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: the lease ID of the newly created lease. + :return: str + ''' + _validate_not_none('lease_duration', lease_duration) + if lease_duration is not -1 and \ + (lease_duration < 15 or lease_duration > 60): + raise ValueError(_ERROR_INVALID_LEASE_DURATION) + + lease = self._lease_container_impl(container_name, + _LeaseActions.Acquire, + None, # lease_id + lease_duration, + None, # lease_break_period + proposed_lease_id, + if_modified_since, + if_unmodified_since, + timeout) + return lease['id'] + + def renew_container_lease( + self, container_name, lease_id, if_modified_since=None, + if_unmodified_since=None, timeout=None): + ''' + Renews the lease. The lease can be renewed if the lease ID specified + matches that associated with the container. Note that + the lease may be renewed even if it has expired as long as the container + has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + :param str container_name: + Name of existing container. + :param str lease_id: + Lease ID for active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: the lease ID of the renewed lease. + :return: str + ''' + _validate_not_none('lease_id', lease_id) + + lease = self._lease_container_impl(container_name, + _LeaseActions.Renew, + lease_id, + None, # lease_duration + None, # lease_break_period + None, # proposed_lease_id + if_modified_since, + if_unmodified_since, + timeout) + return lease['id'] + + def release_container_lease( + self, container_name, lease_id, if_modified_since=None, + if_unmodified_since=None, timeout=None): + ''' + Release the lease. The lease may be released if the lease_id specified matches + that associated with the container. Releasing the lease allows another client + to immediately acquire the lease for the container as soon as the release is complete. + + :param str container_name: + Name of existing container. + :param str lease_id: + Lease ID for active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param int timeout: + The timeout parameter is expressed in seconds. + ''' + _validate_not_none('lease_id', lease_id) + + self._lease_container_impl(container_name, + _LeaseActions.Release, + lease_id, + None, # lease_duration + None, # lease_break_period + None, # proposed_lease_id + if_modified_since, + if_unmodified_since, + timeout) + + def break_container_lease( + self, container_name, lease_break_period=None, + if_modified_since=None, if_unmodified_since=None, timeout=None): + ''' + Break the lease, if the container has an active lease. Once a lease is + broken, it cannot be renewed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. When a lease + is broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the container. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :param str container_name: + Name of existing container. + :param int lease_break_period: + This is the proposed duration of seconds that the lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the lease. If longer, the time remaining on the lease is used. + A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :return: int + ''' + if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60): + raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD) + + lease = self._lease_container_impl(container_name, + _LeaseActions.Break, + None, # lease_id + None, # lease_duration + lease_break_period, + None, # proposed_lease_id + if_modified_since, + if_unmodified_since, + timeout) + return lease['time'] + + def change_container_lease( + self, container_name, lease_id, proposed_lease_id, + if_modified_since=None, if_unmodified_since=None, timeout=None): + ''' + Change the lease ID of an active lease. A change must include the current + lease ID and a new lease ID. + + :param str container_name: + Name of existing container. + :param str lease_id: + Lease ID for active lease. + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param int timeout: + The timeout parameter is expressed in seconds. + ''' + _validate_not_none('lease_id', lease_id) + + self._lease_container_impl(container_name, + _LeaseActions.Change, + lease_id, + None, # lease_duration + None, # lease_break_period + proposed_lease_id, + if_modified_since, + if_unmodified_since, + timeout) + + def list_blobs(self, container_name, prefix=None, num_results=None, include=None, + delimiter=None, marker=None, timeout=None): + ''' + Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service and stop when all blobs have been returned or num_results is reached. + + If num_results is specified and the account has more than that number of + blobs, the generator will have a populated next_marker field once it + finishes. This marker can be used to create a new generator if more + results are desired. + + :param str container_name: + Name of existing container. + :param str prefix: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param int num_results: + Specifies the maximum number of blobs to return, + including all :class:`BlobPrefix` elements. If the request does not specify + num_results or specifies a value greater than 5,000, the server will + return up to 5,000 items. Setting num_results to a value less than + or equal to zero results in error response code 400 (Bad Request). + :param ~azure.storage.blob.models.Include include: + Specifies one or more additional datasets to include in the response. + :param str delimiter: + When the request includes this parameter, the operation + returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the + result list that acts as a placeholder for all blobs whose names begin + with the same substring up to the appearance of the delimiter character. + The delimiter may be a single character or a string. + :param str marker: + An opaque continuation token. This value can be retrieved from the + next_marker field of a previous generator object if num_results was + specified and that generator has finished enumerating results. If + specified, this generator will begin returning results from the point + where the previous generator stopped. + :param int timeout: + The timeout parameter is expressed in seconds. + ''' + operation_context = _OperationContext(location_lock=True) + args = (container_name,) + kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, + 'include': include, 'delimiter': delimiter, 'timeout': timeout, + '_context': operation_context} + resp = self._list_blobs(*args, **kwargs) + + return ListGenerator(resp, self._list_blobs, args, kwargs) + + def _list_blobs(self, container_name, prefix=None, marker=None, + max_results=None, include=None, delimiter=None, timeout=None, + _context=None): + ''' + Returns the list of blobs under the specified container. + + :param str container_name: + Name of existing container. + :parm str prefix: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param str marker: + A string value that identifies the portion of the list + to be returned with the next list operation. The operation returns + a next_marker value within the response body if the list returned was + not complete. The marker value may then be used in a subsequent + call to request the next set of list items. The marker value is + opaque to the client. + :param int max_results: + Specifies the maximum number of blobs to return, + including all :class:`~azure.storage.blob.models.BlobPrefix` elements. If the request does not specify + max_results or specifies a value greater than 5,000, the server will + return up to 5,000 items. Setting max_results to a value less than + or equal to zero results in error response code 400 (Bad Request). + :param str include: + Specifies one or more datasets to include in the + response. To specify more than one of these options on the URI, + you must separate each option with a comma. Valid values are: + snapshots: + Specifies that snapshots should be included in the + enumeration. Snapshots are listed from oldest to newest in + the response. + metadata: + Specifies that blob metadata be returned in the response. + uncommittedblobs: + Specifies that blobs for which blocks have been uploaded, + but which have not been committed using Put Block List + (REST API), be included in the response. + copy: + Version 2012-02-12 and newer. Specifies that metadata + related to any current or previous Copy Blob operation + should be included in the response. + deleted: + Version 2017-07-29 and newer. Specifies that soft deleted blobs + which are retained by the service should be included + in the response. + :param str delimiter: + When the request includes this parameter, the operation + returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the response body that acts as a + placeholder for all blobs whose names begin with the same + substring up to the appearance of the delimiter character. The + delimiter may be a single character or a string. + :param int timeout: + The timeout parameter is expressed in seconds. + ''' + _validate_not_none('container_name', container_name) + request = HTTPRequest() + request.method = 'GET' + request.host_locations = self._get_host_locations(secondary=True) + request.path = _get_path(container_name) + request.query = { + 'restype': 'container', + 'comp': 'list', + 'prefix': _to_str(prefix), + 'delimiter': _to_str(delimiter), + 'marker': _to_str(marker), + 'maxresults': _int_to_str(max_results), + 'include': _to_str(include), + 'timeout': _int_to_str(timeout), + } + + return self._perform_request(request, _convert_xml_to_blob_list, operation_context=_context) + + def get_blob_account_information(self, container_name=None, blob_name=None, timeout=None): + """ + Gets information related to the storage account. + The information can also be retrieved if the user has a SAS to a container or blob. + + :param str container_name: + Name of existing container. + Optional, unless using a SAS token to a specific container or blob, in which case it's required. + :param str blob_name: + Name of existing blob. + Optional, unless using a SAS token to a specific blob, in which case it's required. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: The :class:`~azure.storage.blob.models.AccountInformation`. + """ + request = HTTPRequest() + request.method = 'HEAD' + request.host_locations = self._get_host_locations(secondary=True) + request.path = _get_path(container_name, blob_name) + request.query = { + 'restype': 'account', + 'comp': 'properties', + 'timeout': _int_to_str(timeout), + } + + return self._perform_request(request, _parse_account_information) + + def get_blob_service_stats(self, timeout=None): + ''' + Retrieves statistics related to replication for the Blob service. It is + only available when read-access geo-redundant replication is enabled for + the storage account. + + With geo-redundant replication, Azure Storage maintains your data durable + in two locations. In both locations, Azure Storage constantly maintains + multiple healthy replicas of your data. The location where you read, + create, update, or delete data is the primary storage account location. + The primary location exists in the region you choose at the time you + create an account via the Azure Management Azure classic portal, for + example, North Central US. The location to which your data is replicated + is the secondary location. The secondary location is automatically + determined based on the location of the primary; it is in a second data + center that resides in the same region as the primary location. Read-only + access is available from the secondary location, if read-access geo-redundant + replication is enabled for your storage account. + + :param int timeout: + The timeout parameter is expressed in seconds. + :return: The blob service stats. + :rtype: :class:`~..common.models.ServiceStats` + ''' + request = HTTPRequest() + request.method = 'GET' + request.host_locations = self._get_host_locations(primary=False, secondary=True) + request.path = _get_path() + request.query = { + 'restype': 'service', + 'comp': 'stats', + 'timeout': _int_to_str(timeout), + } + + return self._perform_request(request, _convert_xml_to_service_stats) + + def set_blob_service_properties( + self, logging=None, hour_metrics=None, minute_metrics=None, + cors=None, target_version=None, timeout=None, delete_retention_policy=None, static_website=None): + ''' + Sets the properties of a storage account's Blob service, including + Azure Storage Analytics. If an element (ex Logging) is left as None, the + existing settings on the service for that functionality are preserved. + + :param logging: + Groups the Azure Analytics Logging settings. + :type logging: + :class:`~..common.models.Logging` + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for blobs. + :type hour_metrics: + :class:`~..common.models.Metrics` + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for blobs. + :type minute_metrics: + :class:`~..common.models.Metrics` + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list(:class:`~..common.models.CorsRule`) + :param str target_version: + Indicates the default version to use for requests if an incoming + request's version is not specified. + :param int timeout: + The timeout parameter is expressed in seconds. + :param delete_retention_policy: + The delete retention policy specifies whether to retain deleted blobs. + It also specifies the number of days and versions of blob to keep. + :type delete_retention_policy: + :class:`~..common.models.DeleteRetentionPolicy` + :param static_website: + Specifies whether the static website feature is enabled, + and if yes, indicates the index document and 404 error document to use. + :type static_website: + :class:`~..common.models.StaticWebsite` + ''' + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path() + request.query = { + 'restype': 'service', + 'comp': 'properties', + 'timeout': _int_to_str(timeout), + } + request.body = _get_request_body( + _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, + cors, target_version, delete_retention_policy, static_website)) + + self._perform_request(request) + + def get_blob_service_properties(self, timeout=None): + ''' + Gets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + :param int timeout: + The timeout parameter is expressed in seconds. + :return: The blob :class:`~..common.models.ServiceProperties` with an attached + target_version property. + ''' + request = HTTPRequest() + request.method = 'GET' + request.host_locations = self._get_host_locations(secondary=True) + request.path = _get_path() + request.query = { + 'restype': 'service', + 'comp': 'properties', + 'timeout': _int_to_str(timeout), + } + + return self._perform_request(request, _convert_xml_to_service_properties) + + def get_blob_properties( + self, container_name, blob_name, snapshot=None, lease_id=None, + if_modified_since=None, if_unmodified_since=None, if_match=None, + if_none_match=None, timeout=None): + ''' + Returns all user-defined metadata, standard HTTP properties, and + system properties for the blob. It does not return the content of the blob. + Returns :class:`~azure.storage.blob.models.Blob` + with :class:`~azure.storage.blob.models.BlobProperties` and a metadata dict. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param str snapshot: + The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to retrieve. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: a blob object including properties and metadata. + :rtype: :class:`~azure.storage.blob.models.Blob` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'HEAD' + request.host_locations = self._get_host_locations(secondary=True) + request.path = _get_path(container_name, blob_name) + request.query = { + 'snapshot': _to_str(snapshot), + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-lease-id': _to_str(lease_id), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match), + } + + return self._perform_request(request, _parse_blob, [blob_name, snapshot]) + + def set_blob_properties( + self, container_name, blob_name, content_settings=None, lease_id=None, + if_modified_since=None, if_unmodified_since=None, if_match=None, + if_none_match=None, timeout=None): + ''' + Sets system properties on the blob. If one property is set for the + content_settings, all properties will be overriden. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param ~azure.storage.blob.models.ContentSettings content_settings: + ContentSettings object used to set blob properties. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: ETag and last modified properties for the updated Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'properties', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match), + 'x-ms-lease-id': _to_str(lease_id) + } + if content_settings is not None: + request.headers.update(content_settings._to_headers()) + + return self._perform_request(request, _parse_base_properties) + + def exists(self, container_name, blob_name=None, snapshot=None, timeout=None): + ''' + Returns a boolean indicating whether the container exists (if blob_name + is None), or otherwise a boolean indicating whether the blob exists. + + :param str container_name: + Name of a container. + :param str blob_name: + Name of a blob. If None, the container will be checked for existence. + :param str snapshot: + The snapshot parameter is an opaque DateTime value that, + when present, specifies the snapshot. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: A boolean indicating whether the resource exists. + :rtype: bool + ''' + _validate_not_none('container_name', container_name) + try: + # make head request to see if container/blob/snapshot exists + request = HTTPRequest() + request.method = 'GET' if blob_name is None else 'HEAD' + request.host_locations = self._get_host_locations(secondary=True) + request.path = _get_path(container_name, blob_name) + request.query = { + 'snapshot': _to_str(snapshot), + 'timeout': _int_to_str(timeout), + 'restype': 'container' if blob_name is None else None, + } + + expected_errors = [_CONTAINER_NOT_FOUND_ERROR_CODE] if blob_name is None \ + else [_CONTAINER_NOT_FOUND_ERROR_CODE, _BLOB_NOT_FOUND_ERROR_CODE] + self._perform_request(request, expected_errors=expected_errors) + + return True + except AzureHttpError as ex: + _dont_fail_not_exist(ex) + return False + + def _get_blob( + self, container_name, blob_name, snapshot=None, start_range=None, + end_range=None, validate_content=False, lease_id=None, if_modified_since=None, + if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, + _context=None): + ''' + Downloads a blob's content, metadata, and properties. You can also + call this API to read a snapshot. You can specify a range if you don't + need to download the blob in its entirety. If no range is specified, + the full blob will be downloaded. + + See get_blob_to_* for high level functions that handle the download + of large blobs with automatic chunking and progress notifications. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param str snapshot: + The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to retrieve. + :param int start_range: + Start of byte range to use for downloading a section of the blob. + If no end_range is given, all bytes after the start_range will be downloaded. + The start_range and end_range params are inclusive. + Ex: start_range=0, end_range=511 will download first 512 bytes of blob. + :param int end_range: + End of byte range to use for downloading a section of the blob. + If end_range is given, start_range must be provided. + The start_range and end_range params are inclusive. + Ex: start_range=0, end_range=511 will download first 512 bytes of blob. + :param bool validate_content: + When this is set to True and specified together with the Range header, + the service returns the MD5 hash for the range, as long as the range + is less than or equal to 4 MB in size. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: A Blob with content, properties, and metadata. + :rtype: :class:`~azure.storage.blob.models.Blob` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_decryption_required(self.require_encryption, + self.key_encryption_key, + self.key_resolver_function) + + start_offset, end_offset = 0, 0 + if self.key_encryption_key is not None or self.key_resolver_function is not None: + if start_range is not None: + # Align the start of the range along a 16 byte block + start_offset = start_range % 16 + start_range -= start_offset + + # Include an extra 16 bytes for the IV if necessary + # Because of the previous offsetting, start_range will always + # be a multiple of 16. + if start_range > 0: + start_offset += 16 + start_range -= 16 + + if end_range is not None: + # Align the end of the range along a 16 byte block + end_offset = 15 - (end_range % 16) + end_range += end_offset + + request = HTTPRequest() + request.method = 'GET' + request.host_locations = self._get_host_locations(secondary=True) + request.path = _get_path(container_name, blob_name) + request.query = { + 'snapshot': _to_str(snapshot), + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-lease-id': _to_str(lease_id), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match), + } + _validate_and_format_range_headers( + request, + start_range, + end_range, + start_range_required=False, + end_range_required=False, + check_content_md5=validate_content) + + return self._perform_request(request, _parse_blob, + [blob_name, snapshot, validate_content, self.require_encryption, + self.key_encryption_key, self.key_resolver_function, + start_offset, end_offset], + operation_context=_context) + + def get_blob_to_path( + self, container_name, blob_name, file_path, open_mode='wb', + snapshot=None, start_range=None, end_range=None, + validate_content=False, progress_callback=None, + max_connections=2, lease_id=None, if_modified_since=None, + if_unmodified_since=None, if_match=None, if_none_match=None, + timeout=None): + ''' + Downloads a blob to a file path, with automatic chunking and progress + notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with + properties and metadata. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param str file_path: + Path of file to write out to. + :param str open_mode: + Mode to use when opening the file. Note that specifying append only + open_mode prevents parallel download. So, max_connections must be set + to 1 if this open_mode is used. + :param str snapshot: + The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to retrieve. + :param int start_range: + Start of byte range to use for downloading a section of the blob. + If no end_range is given, all bytes after the start_range will be downloaded. + The start_range and end_range params are inclusive. + Ex: start_range=0, end_range=511 will download first 512 bytes of blob. + :param int end_range: + End of byte range to use for downloading a section of the blob. + If end_range is given, start_range must be provided. + The start_range and end_range params are inclusive. + Ex: start_range=0, end_range=511 will download first 512 bytes of blob. + :param bool validate_content: + If set to true, validates an MD5 hash for each retrieved portion of + the blob. This is primarily valuable for detecting bitflips on the wire + if using http instead of https as https (the default) will already + validate. Note that the service will only return transactional MD5s + for chunks 4MB or less so the first get request will be of size + self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If + self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be + thrown. As computing the MD5 takes processing time and more requests + will need to be done due to the reduced chunk size there may be some + increase in latency. + :param progress_callback: + Callback for progress with signature function(current, total) + where current is the number of bytes transfered so far, and total is + the size of the blob if known. + :type progress_callback: func(current, total) + :param int max_connections: + If set to 2 or greater, an initial get will be done for the first + self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, + the method returns at this point. If it is not, it will download the + remaining data parallel using the number of threads equal to + max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. + If set to 1, a single large get request will be done. This is not + generally recommended but available if very few threads should be + used, network requests are very expensive, or a non-seekable stream + prevents parallel download. This may also be useful if many blobs are + expected to be empty as an extra request is required for empty blobs + if max_connections is greater than 1. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :return: A Blob with properties and metadata. If max_connections is greater + than 1, the content_md5 (if set on the blob) will not be returned. If you + require this value, either use get_blob_properties or set max_connections + to 1. + :rtype: :class:`~azure.storage.blob.models.Blob` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('file_path', file_path) + _validate_not_none('open_mode', open_mode) + + if max_connections > 1 and 'a' in open_mode: + raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) + + with open(file_path, open_mode) as stream: + blob = self.get_blob_to_stream( + container_name, + blob_name, + stream, + snapshot, + start_range, + end_range, + validate_content, + progress_callback, + max_connections, + lease_id, + if_modified_since, + if_unmodified_since, + if_match, + if_none_match, + timeout) + + return blob + + def get_blob_to_stream( + self, container_name, blob_name, stream, snapshot=None, + start_range=None, end_range=None, validate_content=False, + progress_callback=None, max_connections=2, lease_id=None, + if_modified_since=None, if_unmodified_since=None, if_match=None, + if_none_match=None, timeout=None): + + ''' + Downloads a blob to a stream, with automatic chunking and progress + notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with + properties and metadata. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param io.IOBase stream: + Opened stream to write to. + :param str snapshot: + The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to retrieve. + :param int start_range: + Start of byte range to use for downloading a section of the blob. + If no end_range is given, all bytes after the start_range will be downloaded. + The start_range and end_range params are inclusive. + Ex: start_range=0, end_range=511 will download first 512 bytes of blob. + :param int end_range: + End of byte range to use for downloading a section of the blob. + If end_range is given, start_range must be provided. + The start_range and end_range params are inclusive. + Ex: start_range=0, end_range=511 will download first 512 bytes of blob. + :param bool validate_content: + If set to true, validates an MD5 hash for each retrieved portion of + the blob. This is primarily valuable for detecting bitflips on the wire + if using http instead of https as https (the default) will already + validate. Note that the service will only return transactional MD5s + for chunks 4MB or less so the first get request will be of size + self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If + self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be + thrown. As computing the MD5 takes processing time and more requests + will need to be done due to the reduced chunk size there may be some + increase in latency. + :param progress_callback: + Callback for progress with signature function(current, total) + where current is the number of bytes transfered so far, and total is + the size of the blob if known. + :type progress_callback: func(current, total) + :param int max_connections: + If set to 2 or greater, an initial get will be done for the first + self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, + the method returns at this point. If it is not, it will download the + remaining data parallel using the number of threads equal to + max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. + If set to 1, a single large get request will be done. This is not + generally recommended but available if very few threads should be + used, network requests are very expensive, or a non-seekable stream + prevents parallel download. This may also be useful if many blobs are + expected to be empty as an extra request is required for empty blobs + if max_connections is greater than 1. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :return: A Blob with properties and metadata. If max_connections is greater + than 1, the content_md5 (if set on the blob) will not be returned. If you + require this value, either use get_blob_properties or set max_connections + to 1. + :rtype: :class:`~azure.storage.blob.models.Blob` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('stream', stream) + + if end_range is not None: + _validate_not_none("start_range", start_range) + + # the stream must be seekable if parallel download is required + if max_connections > 1: + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) + else: + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + first_get_size = self.MAX_SINGLE_GET_SIZE if not validate_content else self.MAX_CHUNK_GET_SIZE + + initial_request_start = start_range if start_range is not None else 0 + + if end_range is not None and end_range - start_range < first_get_size: + initial_request_end = end_range + else: + initial_request_end = initial_request_start + first_get_size - 1 + + # Send a context object to make sure we always retry to the initial location + operation_context = _OperationContext(location_lock=True) + try: + blob = self._get_blob(container_name, + blob_name, + snapshot, + start_range=initial_request_start, + end_range=initial_request_end, + validate_content=validate_content, + lease_id=lease_id, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match, + timeout=timeout, + _context=operation_context) + + # Parse the total blob size and adjust the download size if ranges + # were specified + blob_size = _parse_length_from_content_range(blob.properties.content_range) + if end_range is not None: + # Use the end_range unless it is over the end of the blob + download_size = min(blob_size, end_range - start_range + 1) + elif start_range is not None: + download_size = blob_size - start_range + else: + download_size = blob_size + except AzureHttpError as ex: + if start_range is None and ex.status_code == 416: + # Get range will fail on an empty blob. If the user did not + # request a range, do a regular get request in order to get + # any properties. + blob = self._get_blob(container_name, + blob_name, + snapshot, + validate_content=validate_content, + lease_id=lease_id, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match, + timeout=timeout, + _context=operation_context) + + # Set the download size to empty + download_size = 0 + else: + raise ex + + # Mark the first progress chunk. If the blob is small or this is a single + # shot download, this is the only call + if progress_callback: + progress_callback(blob.properties.content_length, download_size) + + # Write the content to the user stream + # Clear blob content since output has been written to user stream + if blob.content is not None: + stream.write(blob.content) + blob.content = None + + # If the blob is small, the download is complete at this point. + # If blob size is large, download the rest of the blob in chunks. + if blob.properties.content_length != download_size: + # Lock on the etag. This can be overriden by the user by specifying '*' + if_match = if_match if if_match is not None else blob.properties.etag + + end_blob = blob_size + if end_range is not None: + # Use the end_range unless it is over the end of the blob + end_blob = min(blob_size, end_range + 1) + + _download_blob_chunks( + self, + container_name, + blob_name, + snapshot, + download_size, + self.MAX_CHUNK_GET_SIZE, + first_get_size, + initial_request_end + 1, # start where the first download ended + end_blob, + stream, + max_connections, + progress_callback, + validate_content, + lease_id, + if_modified_since, + if_unmodified_since, + if_match, + if_none_match, + timeout, + operation_context + ) + + # Set the content length to the download size instead of the size of + # the last range + blob.properties.content_length = download_size + + # Overwrite the content range to the user requested range + blob.properties.content_range = 'bytes {0}-{1}/{2}'.format(start_range, end_range, blob_size) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + blob.properties.content_md5 = None + + return blob + + def get_blob_to_bytes( + self, container_name, blob_name, snapshot=None, + start_range=None, end_range=None, validate_content=False, + progress_callback=None, max_connections=2, lease_id=None, + if_modified_since=None, if_unmodified_since=None, if_match=None, + if_none_match=None, timeout=None): + ''' + Downloads a blob as an array of bytes, with automatic chunking and + progress notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with + properties, metadata, and content. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param str snapshot: + The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to retrieve. + :param int start_range: + Start of byte range to use for downloading a section of the blob. + If no end_range is given, all bytes after the start_range will be downloaded. + The start_range and end_range params are inclusive. + Ex: start_range=0, end_range=511 will download first 512 bytes of blob. + :param int end_range: + End of byte range to use for downloading a section of the blob. + If end_range is given, start_range must be provided. + The start_range and end_range params are inclusive. + Ex: start_range=0, end_range=511 will download first 512 bytes of blob. + :param bool validate_content: + If set to true, validates an MD5 hash for each retrieved portion of + the blob. This is primarily valuable for detecting bitflips on the wire + if using http instead of https as https (the default) will already + validate. Note that the service will only return transactional MD5s + for chunks 4MB or less so the first get request will be of size + self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If + self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be + thrown. As computing the MD5 takes processing time and more requests + will need to be done due to the reduced chunk size there may be some + increase in latency. + :param progress_callback: + Callback for progress with signature function(current, total) + where current is the number of bytes transfered so far, and total is + the size of the blob if known. + :type progress_callback: func(current, total) + :param int max_connections: + If set to 2 or greater, an initial get will be done for the first + self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, + the method returns at this point. If it is not, it will download the + remaining data parallel using the number of threads equal to + max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. + If set to 1, a single large get request will be done. This is not + generally recommended but available if very few threads should be + used, network requests are very expensive, or a non-seekable stream + prevents parallel download. This may also be useful if many blobs are + expected to be empty as an extra request is required for empty blobs + if max_connections is greater than 1. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :return: A Blob with properties and metadata. If max_connections is greater + than 1, the content_md5 (if set on the blob) will not be returned. If you + require this value, either use get_blob_properties or set max_connections + to 1. + :rtype: :class:`~azure.storage.blob.models.Blob` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + + stream = BytesIO() + blob = self.get_blob_to_stream( + container_name, + blob_name, + stream, + snapshot, + start_range, + end_range, + validate_content, + progress_callback, + max_connections, + lease_id, + if_modified_since, + if_unmodified_since, + if_match, + if_none_match, + timeout) + + blob.content = stream.getvalue() + return blob + + def get_blob_to_text( + self, container_name, blob_name, encoding='utf-8', snapshot=None, + start_range=None, end_range=None, validate_content=False, + progress_callback=None, max_connections=2, lease_id=None, + if_modified_since=None, if_unmodified_since=None, if_match=None, + if_none_match=None, timeout=None): + ''' + Downloads a blob as unicode text, with automatic chunking and progress + notifications. Returns an instance of :class:`~azure.storage.blob.models.Blob` with + properties, metadata, and content. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param str encoding: + Python encoding to use when decoding the blob data. + :param str snapshot: + The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to retrieve. + :param int start_range: + Start of byte range to use for downloading a section of the blob. + If no end_range is given, all bytes after the start_range will be downloaded. + The start_range and end_range params are inclusive. + Ex: start_range=0, end_range=511 will download first 512 bytes of blob. + :param int end_range: + End of byte range to use for downloading a section of the blob. + If end_range is given, start_range must be provided. + The start_range and end_range params are inclusive. + Ex: start_range=0, end_range=511 will download first 512 bytes of blob. + :param bool validate_content: + If set to true, validates an MD5 hash for each retrieved portion of + the blob. This is primarily valuable for detecting bitflips on the wire + if using http instead of https as https (the default) will already + validate. Note that the service will only return transactional MD5s + for chunks 4MB or less so the first get request will be of size + self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If + self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be + thrown. As computing the MD5 takes processing time and more requests + will need to be done due to the reduced chunk size there may be some + increase in latency. + :param progress_callback: + Callback for progress with signature function(current, total) + where current is the number of bytes transfered so far, and total is + the size of the blob if known. + :type progress_callback: func(current, total) + :param int max_connections: + If set to 2 or greater, an initial get will be done for the first + self.MAX_SINGLE_GET_SIZE bytes of the blob. If this is the entire blob, + the method returns at this point. If it is not, it will download the + remaining data parallel using the number of threads equal to + max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. + If set to 1, a single large get request will be done. This is not + generally recommended but available if very few threads should be + used, network requests are very expensive, or a non-seekable stream + prevents parallel download. This may also be useful if many blobs are + expected to be empty as an extra request is required for empty blobs + if max_connections is greater than 1. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :return: A Blob with properties and metadata. If max_connections is greater + than 1, the content_md5 (if set on the blob) will not be returned. If you + require this value, either use get_blob_properties or set max_connections + to 1. + :rtype: :class:`~azure.storage.blob.models.Blob` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('encoding', encoding) + + blob = self.get_blob_to_bytes(container_name, + blob_name, + snapshot, + start_range, + end_range, + validate_content, + progress_callback, + max_connections, + lease_id, + if_modified_since, + if_unmodified_since, + if_match, + if_none_match, + timeout) + blob.content = blob.content.decode(encoding) + return blob + + def get_blob_metadata( + self, container_name, blob_name, snapshot=None, lease_id=None, + if_modified_since=None, if_unmodified_since=None, if_match=None, + if_none_match=None, timeout=None): + ''' + Returns all user-defined metadata for the specified blob or snapshot. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param str snapshot: + The snapshot parameter is an opaque value that, + when present, specifies the blob snapshot to retrieve. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: + A dictionary representing the blob metadata name, value pairs. + :rtype: dict(str, str) + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'GET' + request.host_locations = self._get_host_locations(secondary=True) + request.path = _get_path(container_name, blob_name) + request.query = { + 'snapshot': _to_str(snapshot), + 'comp': 'metadata', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-lease-id': _to_str(lease_id), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match), + } + + return self._perform_request(request, _parse_metadata) + + def set_blob_metadata(self, container_name, blob_name, + metadata=None, lease_id=None, + if_modified_since=None, if_unmodified_since=None, + if_match=None, if_none_match=None, timeout=None): + ''' + Sets user-defined metadata for the specified blob as one or more + name-value pairs. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param metadata: + Dict containing name and value pairs. Each call to this operation + replaces all existing metadata attached to the blob. To remove all + metadata from the blob, call this operation with no metadata headers. + :type metadata: dict(str, str) + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: ETag and last modified properties for the updated Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'metadata', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match), + 'x-ms-lease-id': _to_str(lease_id), + } + _add_metadata_headers(metadata, request) + + return self._perform_request(request, _parse_base_properties) + + def _lease_blob_impl(self, container_name, blob_name, + lease_action, lease_id, + lease_duration, lease_break_period, + proposed_lease_id, if_modified_since, + if_unmodified_since, if_match, if_none_match, timeout=None): + ''' + Establishes and manages a lease on a blob for write and delete operations. + The Lease Blob operation can be called in one of five modes: + Acquire, to request a new lease. + Renew, to renew an existing lease. + Change, to change the ID of an existing lease. + Release, to free the lease if it is no longer needed so that another + client may immediately acquire a lease against the blob. + Break, to end the lease but ensure that another client cannot acquire + a new lease until the current lease period has expired. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param str lease_action: + Possible _LeaseActions acquire|renew|release|break|change + :param str lease_id: + Required if the blob has an active lease. + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. + :param int lease_break_period: + For a break operation, this is the proposed duration of + seconds that the lease should continue before it is broken, between + 0 and 60 seconds. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining + on the lease is used. A new lease will not be available before the + break period has expired, but the lease may be held for longer than + the break period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :param str proposed_lease_id: + Optional for acquire, required for change. Proposed lease ID, in a + GUID string format. The Blob service returns 400 (Invalid request) + if the proposed lease ID is not in the correct format. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: + Response headers returned from the service call. + :rtype: dict(str, str) + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('lease_action', lease_action) + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'lease', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-lease-id': _to_str(lease_id), + 'x-ms-lease-action': _to_str(lease_action), + 'x-ms-lease-duration': _to_str(lease_duration), + 'x-ms-lease-break-period': _to_str(lease_break_period), + 'x-ms-proposed-lease-id': _to_str(proposed_lease_id), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match), + } + + return self._perform_request(request, _parse_lease) + + def acquire_blob_lease(self, container_name, blob_name, + lease_duration=-1, + proposed_lease_id=None, + if_modified_since=None, + if_unmodified_since=None, + if_match=None, + if_none_match=None, timeout=None): + ''' + Requests a new lease. If the blob does not have an active lease, the Blob + service creates a lease on the blob and returns a new lease ID. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The Blob service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: the lease ID of the newly created lease. + :return: str + ''' + _validate_not_none('lease_duration', lease_duration) + + if lease_duration is not -1 and \ + (lease_duration < 15 or lease_duration > 60): + raise ValueError(_ERROR_INVALID_LEASE_DURATION) + lease = self._lease_blob_impl(container_name, + blob_name, + _LeaseActions.Acquire, + None, # lease_id + lease_duration, + None, # lease_break_period + proposed_lease_id, + if_modified_since, + if_unmodified_since, + if_match, + if_none_match, + timeout) + return lease['id'] + + def renew_blob_lease(self, container_name, blob_name, + lease_id, if_modified_since=None, + if_unmodified_since=None, if_match=None, + if_none_match=None, timeout=None): + ''' + Renews the lease. The lease can be renewed if the lease ID specified on + the request matches that associated with the blob. Note that the lease may + be renewed even if it has expired as long as the blob has not been modified + or leased again since the expiration of that lease. When you renew a lease, + the lease duration clock resets. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param str lease_id: + Lease ID for active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: the lease ID of the renewed lease. + :return: str + ''' + _validate_not_none('lease_id', lease_id) + + lease = self._lease_blob_impl(container_name, + blob_name, + _LeaseActions.Renew, + lease_id, + None, # lease_duration + None, # lease_break_period + None, # proposed_lease_id + if_modified_since, + if_unmodified_since, + if_match, + if_none_match, + timeout) + return lease['id'] + + def release_blob_lease(self, container_name, blob_name, + lease_id, if_modified_since=None, + if_unmodified_since=None, if_match=None, + if_none_match=None, timeout=None): + ''' + Releases the lease. The lease may be released if the lease ID specified on the + request matches that associated with the blob. Releasing the lease allows another + client to immediately acquire the lease for the blob as soon as the release is complete. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param str lease_id: + Lease ID for active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + ''' + _validate_not_none('lease_id', lease_id) + + self._lease_blob_impl(container_name, + blob_name, + _LeaseActions.Release, + lease_id, + None, # lease_duration + None, # lease_break_period + None, # proposed_lease_id + if_modified_since, + if_unmodified_since, + if_match, + if_none_match, + timeout) + + def break_blob_lease(self, container_name, blob_name, + lease_break_period=None, + if_modified_since=None, + if_unmodified_since=None, + if_match=None, + if_none_match=None, timeout=None): + ''' + Breaks the lease, if the blob has an active lease. Once a lease is broken, + it cannot be renewed. Any authorized request can break the lease; the request + is not required to specify a matching lease ID. When a lease is broken, + the lease break period is allowed to elapse, during which time no lease operation + except break and release can be performed on the blob. When a lease is successfully + broken, the response indicates the interval in seconds until a new lease can be acquired. + + A lease that has been broken can also be released, in which case another client may + immediately acquire the lease on the blob. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param int lease_break_period: + For a break operation, this is the proposed duration of + seconds that the lease should continue before it is broken, between + 0 and 60 seconds. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining + on the lease is used. A new lease will not be available before the + break period has expired, but the lease may be held for longer than + the break period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: Approximate time remaining in the lease period, in seconds. + :return: int + ''' + if (lease_break_period is not None) and (lease_break_period < 0 or lease_break_period > 60): + raise ValueError(_ERROR_INVALID_LEASE_BREAK_PERIOD) + + lease = self._lease_blob_impl(container_name, + blob_name, + _LeaseActions.Break, + None, # lease_id + None, # lease_duration + lease_break_period, + None, # proposed_lease_id + if_modified_since, + if_unmodified_since, + if_match, + if_none_match, + timeout) + return lease['time'] + + def change_blob_lease(self, container_name, blob_name, + lease_id, + proposed_lease_id, + if_modified_since=None, + if_unmodified_since=None, + if_match=None, + if_none_match=None, timeout=None): + ''' + Changes the lease ID of an active lease. A change must include the current + lease ID and a new lease ID. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param str lease_id: + Required if the blob has an active lease. + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + ''' + self._lease_blob_impl(container_name, + blob_name, + _LeaseActions.Change, + lease_id, + None, # lease_duration + None, # lease_break_period + proposed_lease_id, + if_modified_since, + if_unmodified_since, + if_match, + if_none_match, + timeout) + + def snapshot_blob(self, container_name, blob_name, + metadata=None, if_modified_since=None, + if_unmodified_since=None, if_match=None, + if_none_match=None, lease_id=None, timeout=None): + ''' + Creates a read-only snapshot of a blob. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param metadata: + Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the + base blob metadata to the snapshot. If one or more name-value pairs + are specified, the snapshot is created with the specified metadata, + and metadata is not copied from the base blob. + :type metadata: dict(str, str) + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param str lease_id: + Required if the blob has an active lease. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: snapshot properties + :rtype: :class:`~azure.storage.blob.models.Blob` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'snapshot', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match), + 'x-ms-lease-id': _to_str(lease_id) + } + _add_metadata_headers(metadata, request) + + return self._perform_request(request, _parse_snapshot_blob, [blob_name]) + + def copy_blob(self, container_name, blob_name, copy_source, + metadata=None, + source_if_modified_since=None, + source_if_unmodified_since=None, + source_if_match=None, source_if_none_match=None, + destination_if_modified_since=None, + destination_if_unmodified_since=None, + destination_if_match=None, + destination_if_none_match=None, + destination_lease_id=None, + source_lease_id=None, timeout=None): + ''' + Copies a blob asynchronously. This operation returns a copy operation + properties object, including a copy ID you can use to check or abort the + copy operation. The Blob service copies blobs on a best-effort basis. + + The source blob for a copy operation may be a block blob, an append blob, + or a page blob. If the destination blob already exists, it must be of the + same blob type as the source blob. Any existing destination blob will be + overwritten. The destination blob cannot be modified while a copy operation + is in progress. + + When copying from a page blob, the Blob service creates a destination page + blob of the source blob's length, initially containing all zeroes. Then + the source page ranges are enumerated, and non-empty ranges are copied. + + For a block blob or an append blob, the Blob service creates a committed + blob of zero length before returning from this operation. When copying + from a block blob, all committed blocks and their block IDs are copied. + Uncommitted blocks are not copied. At the end of the copy operation, the + destination blob will have the same committed block count as the source. + + When copying from an append blob, all committed blocks are copied. At the + end of the copy operation, the destination blob will have the same committed + block count as the source. + + For all blob types, you can call get_blob_properties on the destination + blob to check the status of the copy operation. The final blob will be + committed when the copy completes. + + :param str container_name: + Name of the destination container. The container must exist. + :param str blob_name: + Name of the destination blob. If the destination blob exists, it will + be overwritten. Otherwise, it will be created. + :param str copy_source: + A URL of up to 2 KB in length that specifies an Azure file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :param metadata: + Name-value pairs associated with the blob as metadata. If no name-value + pairs are specified, the operation will copy the metadata from the + source blob or file to the destination blob. If one or more name-value + pairs are specified, the destination blob is created with the specified + metadata, and metadata is not copied from the source blob or file. + :type metadata: dict(str, str) + :param datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source + blob has been modified since the specified date/time. + :param datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source blob + has not been modified since the specified date/time. + :param ETag source_if_match: + An ETag value, or the wildcard character (*). Specify this conditional + header to copy the source blob only if its ETag matches the value + specified. If the ETag values do not match, the Blob service returns + status code 412 (Precondition Failed). This header cannot be specified + if the source is an Azure File. + :param ETag source_if_none_match: + An ETag value, or the wildcard character (*). Specify this conditional + header to copy the blob only if its ETag does not match the value + specified. If the values are identical, the Blob service returns status + code 412 (Precondition Failed). This header cannot be specified if the + source is an Azure File. + :param datetime destination_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has been modified since the specified date/time. + If the destination blob has not been modified, the Blob service returns + status code 412 (Precondition Failed). + :param datetime destination_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has not been modified since the specified + date/time. If the destination blob has been modified, the Blob service + returns status code 412 (Precondition Failed). + :param ETag destination_if_match: + An ETag value, or the wildcard character (*). Specify an ETag value for + this conditional header to copy the blob only if the specified ETag value + matches the ETag value for an existing destination blob. If the ETag for + the destination blob does not match the ETag specified for If-Match, the + Blob service returns status code 412 (Precondition Failed). + :param ETag destination_if_none_match: + An ETag value, or the wildcard character (*). Specify an ETag value for + this conditional header to copy the blob only if the specified ETag value + does not match the ETag value for the destination blob. Specify the wildcard + character (*) to perform the operation only if the destination blob does not + exist. If the specified condition isn't met, the Blob service returns status + code 412 (Precondition Failed). + :param str destination_lease_id: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :param str source_lease_id: + Specify this to perform the Copy Blob operation only if + the lease ID given matches the active lease ID of the source blob. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: Copy operation properties such as status, source, and ID. + :rtype: :class:`~azure.storage.blob.models.CopyProperties` + ''' + return self._copy_blob(container_name, blob_name, copy_source, + metadata, + None, + source_if_modified_since, source_if_unmodified_since, + source_if_match, source_if_none_match, + destination_if_modified_since, + destination_if_unmodified_since, + destination_if_match, + destination_if_none_match, + destination_lease_id, + source_lease_id, timeout, + False) + + def _copy_blob(self, container_name, blob_name, copy_source, + metadata=None, + premium_page_blob_tier=None, + source_if_modified_since=None, + source_if_unmodified_since=None, + source_if_match=None, source_if_none_match=None, + destination_if_modified_since=None, + destination_if_unmodified_since=None, + destination_if_match=None, + destination_if_none_match=None, + destination_lease_id=None, + source_lease_id=None, timeout=None, + incremental_copy=False): + ''' + See copy_blob for more details. This helper method + allows for standard copies as well as incremental copies which are only supported for page blobs. + :param bool incremental_copy: + The timeout parameter is expressed in seconds. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('copy_source', copy_source) + + if copy_source.startswith('/'): + # Backwards compatibility for earlier versions of the SDK where + # the copy source can be in the following formats: + # - Blob in named container: + # /accountName/containerName/blobName + # - Snapshot in named container: + # /accountName/containerName/blobName?snapshot= + # - Blob in root container: + # /accountName/blobName + # - Snapshot in root container: + # /accountName/blobName?snapshot= + account, _, source = \ + copy_source.partition('/')[2].partition('/') + copy_source = self.protocol + '://' + \ + self.primary_endpoint + '/' + source + + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + + if incremental_copy: + request.query = { + 'comp': 'incrementalcopy', + 'timeout': _int_to_str(timeout), + } + else: + request.query = {'timeout': _int_to_str(timeout)} + + request.headers = { + 'x-ms-copy-source': _to_str(copy_source), + 'x-ms-source-if-modified-since': _to_str(source_if_modified_since), + 'x-ms-source-if-unmodified-since': _to_str(source_if_unmodified_since), + 'x-ms-source-if-match': _to_str(source_if_match), + 'x-ms-source-if-none-match': _to_str(source_if_none_match), + 'If-Modified-Since': _datetime_to_utc_string(destination_if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(destination_if_unmodified_since), + 'If-Match': _to_str(destination_if_match), + 'If-None-Match': _to_str(destination_if_none_match), + 'x-ms-lease-id': _to_str(destination_lease_id), + 'x-ms-source-lease-id': _to_str(source_lease_id), + 'x-ms-access-tier': _to_str(premium_page_blob_tier) + } + _add_metadata_headers(metadata, request) + + return self._perform_request(request, _parse_properties, [BlobProperties]).copy + + def abort_copy_blob(self, container_name, blob_name, copy_id, + lease_id=None, timeout=None): + ''' + Aborts a pending copy_blob operation, and leaves a destination blob + with zero length and full metadata. + + :param str container_name: + Name of destination container. + :param str blob_name: + Name of destination blob. + :param str copy_id: + Copy identifier provided in the copy.id of the original + copy_blob operation. + :param str lease_id: + Required if the destination blob has an active infinite lease. + :param int timeout: + The timeout parameter is expressed in seconds. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('copy_id', copy_id) + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'copy', + 'copyid': _to_str(copy_id), + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-lease-id': _to_str(lease_id), + 'x-ms-copy-action': 'abort', + } + + self._perform_request(request) + + def delete_blob(self, container_name, blob_name, snapshot=None, + lease_id=None, delete_snapshots=None, + if_modified_since=None, if_unmodified_since=None, + if_match=None, if_none_match=None, timeout=None): + ''' + Marks the specified blob or snapshot for deletion. + The blob is later deleted during garbage collection. + + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the Delete + Blob operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot + and retains the blob or snapshot for specified number of days. + After specified number of days, blob's data is removed from the service during garbage collection. + Soft deleted blob or snapshot is accessible through List Blobs API specifying include=Include.Deleted option. + Soft-deleted blob or snapshot can be restored using Undelete API. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param str snapshot: + The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to delete. + :param str lease_id: + Required if the blob has an active lease. + :param ~azure.storage.blob.models.DeleteSnapshot delete_snapshots: + Required if the blob has associated snapshots. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'DELETE' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.headers = { + 'x-ms-lease-id': _to_str(lease_id), + 'x-ms-delete-snapshots': _to_str(delete_snapshots), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match), + } + request.query = { + 'snapshot': _to_str(snapshot), + 'timeout': _int_to_str(timeout) + } + + self._perform_request(request) + + def undelete_blob(self, container_name, blob_name, timeout=None): + ''' + The undelete Blob operation restores the contents and metadata of soft deleted blob or snapshot. + Attempting to undelete a blob or snapshot that is not soft deleted will succeed without any changes. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param int timeout: + The timeout parameter is expressed in seconds. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'undelete', + 'timeout': _int_to_str(timeout) + } + + self._perform_request(request) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/blockblobservice.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/blockblobservice.py new file mode 100644 index 000000000000..abd693974656 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/blockblobservice.py @@ -0,0 +1,1063 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from io import ( + BytesIO +) +from os import ( + path, +) + +from ..common._common_conversion import ( + _encode_base64, + _to_str, + _int_to_str, + _datetime_to_utc_string, + _get_content_md5, +) +from ..common._constants import ( + SERVICE_HOST_BASE, + DEFAULT_PROTOCOL, +) +from ..common._error import ( + _validate_not_none, + _validate_type_bytes, + _validate_encryption_required, + _validate_encryption_unsupported, + _ERROR_VALUE_NEGATIVE, + _ERROR_VALUE_SHOULD_BE_STREAM +) +from ..common._http import HTTPRequest +from ..common._serialization import ( + _get_request_body, + _get_data_bytes_only, + _get_data_bytes_or_stream_only, + _add_metadata_headers, +) +from ..common._serialization import ( + _len_plus +) + +from ._deserialization import ( + _convert_xml_to_block_list, + _parse_base_properties, +) +from ._encryption import ( + _encrypt_blob, + _generate_blob_encryption_data, +) +from ._serialization import ( + _convert_block_list_to_xml, + _get_path, +) +from ._upload_chunking import ( + _BlockBlobChunkUploader, + _upload_blob_chunks, + _upload_blob_substream_blocks, +) +from .baseblobservice import BaseBlobService +from .models import ( + _BlobTypes, +) + + +class BlockBlobService(BaseBlobService): + ''' + Block blobs let you upload large blobs efficiently. Block blobs are comprised + of blocks, each of which is identified by a block ID. You create or modify a + block blob by writing a set of blocks and committing them by their block IDs. + Each block can be a different size, up to a maximum of 100 MB, and a block blob + can include up to 50,000 blocks. The maximum size of a block blob is therefore + approximately 4.75 TB (100 MB X 50,000 blocks). If you are writing a block + blob that is no more than 64 MB in size, you can upload it in its entirety with + a single write operation; see create_blob_from_bytes. + + :ivar int MAX_SINGLE_PUT_SIZE: + The largest size upload supported in a single put call. This is used by + the create_blob_from_* methods if the content length is known and is less + than this value. + :ivar int MAX_BLOCK_SIZE: + The size of the blocks put by create_blob_from_* methods if the content + length is unknown or is larger than MAX_SINGLE_PUT_SIZE. Smaller blocks + may be put. The maximum block size the service supports is 100MB. + :ivar int MIN_LARGE_BLOCK_UPLOAD_THRESHOLD: + The minimum block size at which the the memory-optimized, block upload + algorithm is considered. This algorithm is only applicable to the create_blob_from_file and + create_blob_from_stream methods and will prevent the full buffering of blocks. + In addition to the block size, ContentMD5 validation and Encryption must be disabled as + these options require the blocks to be buffered. + ''' + + MAX_SINGLE_PUT_SIZE = 64 * 1024 * 1024 + MAX_BLOCK_SIZE = 4 * 1024 * 1024 + MIN_LARGE_BLOCK_UPLOAD_THRESHOLD = 4 * 1024 * 1024 + 1 + + def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False, + protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None, + request_session=None, connection_string=None, socket_timeout=None, token_credential=None): + ''' + :param str account_name: + The storage account name. This is used to authenticate requests + signed with an account key and to construct the storage endpoint. It + is required unless a connection string is given, or if a custom + domain is used with anonymous authentication. + :param str account_key: + The storage account key. This is used for shared key authentication. + If neither account key or sas token is specified, anonymous access + will be used. + :param str sas_token: + A shared access signature token to use to authenticate requests + instead of the account key. If account key and sas token are both + specified, account key will be used to sign. If neither are + specified, anonymous access will be used. + :param bool is_emulated: + Whether to use the emulator. Defaults to False. If specified, will + override all other parameters besides connection string and request + session. + :param str protocol: + The protocol to use for requests. Defaults to https. + :param str endpoint_suffix: + The host base component of the url, minus the account name. Defaults + to Azure (core.windows.net). Override this to use the China cloud + (core.chinacloudapi.cn). + :param str custom_domain: + The custom domain to use. This can be set in the Azure Portal. For + example, 'www.mydomain.com'. + :param requests.Session request_session: + The session object to use for http requests. + :param str connection_string: + If specified, this will override all other parameters besides + request session. See + http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ + for the connection string format. + :param int socket_timeout: + If specified, this will override the default socket timeout. The timeout specified is in seconds. + See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. + :param token_credential: + A token credential used to authenticate HTTPS requests. The token value + should be updated before its expiration. + :type `~azure.storage.common.TokenCredential` + ''' + self.blob_type = _BlobTypes.BlockBlob + super(BlockBlobService, self).__init__( + account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, + custom_domain, request_session, connection_string, socket_timeout, token_credential) + + def put_block(self, container_name, blob_name, block, block_id, + validate_content=False, lease_id=None, timeout=None): + ''' + Creates a new block to be committed as part of a blob. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob. + :param block: Content of the block. + :type block: io.IOBase or bytes + Content of the block. + :param str block_id: + A valid Base64 string value that identifies the block. Prior to + encoding, the string must be less than or equal to 64 bytes in size. + For a given blob, the length of the value specified for the blockid + parameter must be the same size for each block. Note that the Base64 + string must be URL-encoded. + :param bool validate_content: + If true, calculates an MD5 hash of the block content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + blob. + :param str lease_id: + Required if the blob has an active lease. + :param int timeout: + The timeout parameter is expressed in seconds. + ''' + _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) + + self._put_block( + container_name, + blob_name, + block, + block_id, + validate_content=validate_content, + lease_id=lease_id, + timeout=timeout + ) + + def put_block_list( + self, container_name, blob_name, block_list, content_settings=None, + metadata=None, validate_content=False, lease_id=None, if_modified_since=None, + if_unmodified_since=None, if_match=None, if_none_match=None, + timeout=None): + ''' + Writes a blob by specifying the list of block IDs that make up the blob. + In order to be written as part of a blob, a block must have been + successfully written to the server in a prior Put Block operation. + + You can call Put Block List to update a blob by uploading only those + blocks that have changed, then committing the new and existing blocks + together. You can do this by specifying whether to commit a block from + the committed block list or from the uncommitted block list, or to commit + the most recently uploaded version of the block, whichever list it may + belong to. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param block_list: + A list of :class:`~azure.storeage.blob.models.BlobBlock` containing the block ids and block state. + :type block_list: list(:class:`~azure.storage.blob.models.BlobBlock`) + :param ~azure.storage.blob.models.ContentSettings content_settings: + ContentSettings object used to set properties on the blob. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param bool validate_content: + If true, calculates an MD5 hash of the block list content. The storage + service checks the hash of the block list content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this check is associated with + the block list content, and not with the content of the blob itself. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: ETag and last modified properties for the updated Block Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + + _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) + + return self._put_block_list( + container_name, + blob_name, + block_list, + content_settings=content_settings, + metadata=metadata, + validate_content=validate_content, + lease_id=lease_id, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match, + timeout=timeout + ) + + def get_block_list(self, container_name, blob_name, snapshot=None, + block_list_type=None, lease_id=None, timeout=None): + ''' + Retrieves the list of blocks that have been uploaded as part of a + block blob. There are two block lists maintained for a blob: + Committed Block List: + The list of blocks that have been successfully committed to a + given blob with Put Block List. + Uncommitted Block List: + The list of blocks that have been uploaded for a blob using + Put Block, but that have not yet been committed. These blocks + are stored in Azure in association with a blob, but do not yet + form part of the blob. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param str snapshot: + Datetime to determine the time to retrieve the blocks. + :param str block_list_type: + Specifies whether to return the list of committed blocks, the list + of uncommitted blocks, or both lists together. Valid values are: + committed, uncommitted, or all. + :param str lease_id: + Required if the blob has an active lease. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: list committed and/or uncommitted blocks for Block Blob + :rtype: :class:`~azure.storage.blob.models.BlobBlockList` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'GET' + request.host_locations = self._get_host_locations(secondary=True) + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'blocklist', + 'snapshot': _to_str(snapshot), + 'blocklisttype': _to_str(block_list_type), + 'timeout': _int_to_str(timeout), + } + request.headers = {'x-ms-lease-id': _to_str(lease_id)} + + return self._perform_request(request, _convert_xml_to_block_list) + + def put_block_from_url(self, container_name, blob_name, copy_source_url, source_range_start, source_range_end, + block_id, source_content_md5=None, lease_id=None, timeout=None): + """ + Creates a new block to be committed as part of a blob. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob. + :param str copy_source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int source_range_start: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + :param int source_range_end: + This indicates the end of the range of bytes(inclusive) that has to be taken from the copy source. + :param str block_id: + A valid Base64 string value that identifies the block. Prior to + encoding, the string must be less than or equal to 64 bytes in size. + For a given blob, the length of the value specified for the blockid + parameter must be the same size for each block. Note that the Base64 + string must be URL-encoded. + :param str source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :param str lease_id: + Required if the blob has an active lease. + :param int timeout: + The timeout parameter is expressed in seconds. + """ + _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('copy_source_url', copy_source_url) + _validate_not_none('source_range_start', source_range_start) + _validate_not_none('source_range_end', source_range_end) + _validate_not_none('block_id', block_id) + + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'block', + 'blockid': _encode_base64(_to_str(block_id)), + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-lease-id': _to_str(lease_id), + 'x-ms-copy-source': copy_source_url, + 'x-ms-source-range': 'bytes=' + _to_str(source_range_start) + '-' + _to_str(source_range_end), + 'x-ms-source-content-md5': source_content_md5, + } + + self._perform_request(request) + + # ----Convenience APIs----------------------------------------------------- + + def create_blob_from_path( + self, container_name, blob_name, file_path, content_settings=None, + metadata=None, validate_content=False, progress_callback=None, + max_connections=2, lease_id=None, if_modified_since=None, + if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): + ''' + Creates a new blob from a file path, or updates the content of an + existing blob, with automatic chunking and progress notifications. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob to create or update. + :param str file_path: + Path of the file to upload as the blob content. + :param ~azure.storage.blob.models.ContentSettings content_settings: + ContentSettings object used to set blob properties. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :param progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + :type progress_callback: func(current, total) + :param int max_connections: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :return: ETag and last modified properties for the Block Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('file_path', file_path) + + count = path.getsize(file_path) + with open(file_path, 'rb') as stream: + return self.create_blob_from_stream( + container_name=container_name, + blob_name=blob_name, + stream=stream, + count=count, + content_settings=content_settings, + metadata=metadata, + validate_content=validate_content, + lease_id=lease_id, + progress_callback=progress_callback, + max_connections=max_connections, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match, + timeout=timeout) + + def create_blob_from_stream( + self, container_name, blob_name, stream, count=None, + content_settings=None, metadata=None, validate_content=False, + progress_callback=None, max_connections=2, lease_id=None, + if_modified_since=None, if_unmodified_since=None, if_match=None, + if_none_match=None, timeout=None, use_byte_buffer=False): + ''' + Creates a new blob from a file/stream, or updates the content of + an existing blob, with automatic chunking and progress + notifications. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob to create or update. + :param io.IOBase stream: + Opened file/stream to upload as the blob content. + :param int count: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param ~azure.storage.blob.models.ContentSettings content_settings: + ContentSettings object used to set blob properties. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :param progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + :type progress_callback: func(current, total) + :param int max_connections: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. Note that parallel upload requires the stream to be seekable. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :param bool use_byte_buffer: + If True, this will force usage of the original full block buffering upload path. + By default, this value is False and will employ a memory-efficient, + streaming upload algorithm under the following conditions: + The provided stream is seekable, 'require_encryption' is False, and + MAX_BLOCK_SIZE >= MIN_LARGE_BLOCK_UPLOAD_THRESHOLD. + One should consider the drawbacks of using this approach. In order to achieve + memory-efficiency, a IOBase stream or file-like object is segmented into logical blocks + using a SubStream wrapper. In order to read the correct data, each SubStream must acquire + a lock so that it can safely seek to the right position on the shared, underlying stream. + If max_connections > 1, the concurrency will result in a considerable amount of seeking on + the underlying stream. For the most common inputs such as a file-like stream object, seeking + is an inexpensive operation and this is not much of a concern. However, for other variants of streams + this may not be the case. The trade-off for memory-efficiency must be weighed against the cost of seeking + with your input stream. + The SubStream class will attempt to buffer up to 4 MB internally to reduce the amount of + seek and read calls to the underlying stream. This is particularly beneficial when uploading larger blocks. + :return: ETag and last modified properties for the Block Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('stream', stream) + _validate_encryption_required(self.require_encryption, self.key_encryption_key) + + # Adjust count to include padding if we are expected to encrypt. + adjusted_count = count + if (self.key_encryption_key is not None) and (adjusted_count is not None): + adjusted_count += (16 - (count % 16)) + + # Do single put if the size is smaller than MAX_SINGLE_PUT_SIZE + if adjusted_count is not None and (adjusted_count < self.MAX_SINGLE_PUT_SIZE): + if progress_callback: + progress_callback(0, count) + + data = stream.read(count) + resp = self._put_blob( + container_name=container_name, + blob_name=blob_name, + blob=data, + content_settings=content_settings, + metadata=metadata, + validate_content=validate_content, + lease_id=lease_id, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match, + timeout=timeout) + + if progress_callback: + progress_callback(count, count) + + return resp + else: # Size is larger than MAX_SINGLE_PUT_SIZE, must upload with multiple put_block calls + cek, iv, encryption_data = None, None, None + + use_original_upload_path = use_byte_buffer or validate_content or self.require_encryption or \ + self.MAX_BLOCK_SIZE < self.MIN_LARGE_BLOCK_UPLOAD_THRESHOLD or \ + hasattr(stream, 'seekable') and not stream.seekable() or \ + not hasattr(stream, 'seek') or not hasattr(stream, 'tell') + + if use_original_upload_path: + if self.key_encryption_key: + cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key) + + block_ids = _upload_blob_chunks( + blob_service=self, + container_name=container_name, + blob_name=blob_name, + blob_size=count, + block_size=self.MAX_BLOCK_SIZE, + stream=stream, + max_connections=max_connections, + progress_callback=progress_callback, + validate_content=validate_content, + lease_id=lease_id, + uploader_class=_BlockBlobChunkUploader, + timeout=timeout, + content_encryption_key=cek, + initialization_vector=iv + ) + else: + block_ids = _upload_blob_substream_blocks( + blob_service=self, + container_name=container_name, + blob_name=blob_name, + blob_size=count, + block_size=self.MAX_BLOCK_SIZE, + stream=stream, + max_connections=max_connections, + progress_callback=progress_callback, + validate_content=validate_content, + lease_id=lease_id, + uploader_class=_BlockBlobChunkUploader, + timeout=timeout, + ) + + return self._put_block_list( + container_name=container_name, + blob_name=blob_name, + block_list=block_ids, + content_settings=content_settings, + metadata=metadata, + validate_content=validate_content, + lease_id=lease_id, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match, + timeout=timeout, + encryption_data=encryption_data + ) + + def create_blob_from_bytes( + self, container_name, blob_name, blob, index=0, count=None, + content_settings=None, metadata=None, validate_content=False, + progress_callback=None, max_connections=2, lease_id=None, + if_modified_since=None, if_unmodified_since=None, if_match=None, + if_none_match=None, timeout=None): + ''' + Creates a new blob from an array of bytes, or updates the content + of an existing blob, with automatic chunking and progress + notifications. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob to create or update. + :param bytes blob: + Content of blob as an array of bytes. + :param int index: + Start index in the array of bytes. + :param int count: + Number of bytes to upload. Set to None or negative value to upload + all bytes starting from index. + :param ~azure.storage.blob.models.ContentSettings content_settings: + ContentSettings object used to set blob properties. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. + :param progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + :type progress_callback: func(current, total) + :param int max_connections: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :return: ETag and last modified properties for the Block Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('blob', blob) + _validate_not_none('index', index) + _validate_type_bytes('blob', blob) + + if index < 0: + raise IndexError(_ERROR_VALUE_NEGATIVE.format('index')) + + if count is None or count < 0: + count = len(blob) - index + + stream = BytesIO(blob) + stream.seek(index) + + return self.create_blob_from_stream( + container_name=container_name, + blob_name=blob_name, + stream=stream, + count=count, + content_settings=content_settings, + metadata=metadata, + validate_content=validate_content, + progress_callback=progress_callback, + max_connections=max_connections, + lease_id=lease_id, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match, + timeout=timeout, + use_byte_buffer=True + ) + + def create_blob_from_text( + self, container_name, blob_name, text, encoding='utf-8', + content_settings=None, metadata=None, validate_content=False, + progress_callback=None, max_connections=2, lease_id=None, + if_modified_since=None, if_unmodified_since=None, if_match=None, + if_none_match=None, timeout=None): + ''' + Creates a new blob from str/unicode, or updates the content of an + existing blob, with automatic chunking and progress notifications. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob to create or update. + :param str text: + Text to upload to the blob. + :param str encoding: + Python encoding to use to convert the text to bytes. + :param ~azure.storage.blob.models.ContentSettings content_settings: + ContentSettings object used to set blob properties. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. + :param progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + :type progress_callback: func(current, total) + :param int max_connections: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :return: ETag and last modified properties for the Block Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('text', text) + + if not isinstance(text, bytes): + _validate_not_none('encoding', encoding) + text = text.encode(encoding) + + return self.create_blob_from_bytes( + container_name=container_name, + blob_name=blob_name, + blob=text, + index=0, + count=len(text), + content_settings=content_settings, + metadata=metadata, + validate_content=validate_content, + lease_id=lease_id, + progress_callback=progress_callback, + max_connections=max_connections, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match, + timeout=timeout) + + def set_standard_blob_tier( + self, container_name, blob_name, standard_blob_tier, timeout=None): + ''' + Sets the block blob tiers on the blob. This API is only supported for block blobs on standard storage accounts. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob to update. + :param StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('standard_blob_tier', standard_blob_tier) + + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'tier', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-access-tier': _to_str(standard_blob_tier) + } + + self._perform_request(request) + + # -----Helper methods------------------------------------ + def _put_blob(self, container_name, blob_name, blob, content_settings=None, + metadata=None, validate_content=False, lease_id=None, if_modified_since=None, + if_unmodified_since=None, if_match=None, if_none_match=None, + timeout=None): + ''' + Creates a blob or updates an existing blob. + + See create_blob_from_* for high level + functions that handle the creation and upload of large blobs with + automatic chunking and progress notifications. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob to create or update. + :param bytes blob: + Content of blob as bytes (size < 64MB). For larger size, you + must call put_block and put_block_list to set content of blob. + :param ~azure.storage.blob.models.ContentSettings content_settings: + ContentSettings object used to set properties on the blob. + :param metadata: + Name-value pairs associated with the blob as metadata. + :param bool validate_content: + If true, calculates an MD5 hash of the blob content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + blob. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: ETag and last modified properties for the new Block Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_encryption_required(self.require_encryption, self.key_encryption_key) + + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = {'timeout': _int_to_str(timeout)} + request.headers = { + 'x-ms-blob-type': _to_str(self.blob_type), + 'x-ms-lease-id': _to_str(lease_id), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match) + } + _add_metadata_headers(metadata, request) + if content_settings is not None: + request.headers.update(content_settings._to_headers()) + blob = _get_data_bytes_only('blob', blob) + if self.key_encryption_key: + encryption_data, blob = _encrypt_blob(blob, self.key_encryption_key) + request.headers['x-ms-meta-encryptiondata'] = encryption_data + request.body = blob + + if validate_content: + computed_md5 = _get_content_md5(request.body) + request.headers['Content-MD5'] = _to_str(computed_md5) + + return self._perform_request(request, _parse_base_properties) + + def _put_block(self, container_name, blob_name, block, block_id, + validate_content=False, lease_id=None, timeout=None): + ''' + See put_block for more details. This helper method + allows for encryption or other such special behavior because + it is safely handled by the library. These behaviors are + prohibited in the public version of this function. + ''' + + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('block', block) + _validate_not_none('block_id', block_id) + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'block', + 'blockid': _encode_base64(_to_str(block_id)), + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-lease-id': _to_str(lease_id) + } + request.body = _get_data_bytes_or_stream_only('block', block) + if hasattr(request.body, 'read'): + if _len_plus(request.body) is None: + try: + data = b'' + for chunk in iter(lambda: request.body.read(4096), b""): + data += chunk + request.body = data + except AttributeError: + raise ValueError(_ERROR_VALUE_SHOULD_BE_STREAM.format('request.body')) + + if validate_content: + computed_md5 = _get_content_md5(request.body) + request.headers['Content-MD5'] = _to_str(computed_md5) + + self._perform_request(request) + + def _put_block_list( + self, container_name, blob_name, block_list, content_settings=None, + metadata=None, validate_content=False, lease_id=None, if_modified_since=None, + if_unmodified_since=None, if_match=None, if_none_match=None, + timeout=None, encryption_data=None): + ''' + See put_block_list for more details. This helper method + allows for encryption or other such special behavior because + it is safely handled by the library. These behaviors are + prohibited in the public version of this function. + :param str encryption_data: + A JSON formatted string containing the encryption metadata generated for this + blob if it was encrypted all at once upon upload. This should only be passed + in by internal methods. + ''' + + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('block_list', block_list) + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'blocklist', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-lease-id': _to_str(lease_id), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match), + } + _add_metadata_headers(metadata, request) + if content_settings is not None: + request.headers.update(content_settings._to_headers()) + request.body = _get_request_body( + _convert_block_list_to_xml(block_list)) + + if validate_content: + computed_md5 = _get_content_md5(request.body) + request.headers['Content-MD5'] = _to_str(computed_md5) + + if encryption_data is not None: + request.headers['x-ms-meta-encryptiondata'] = encryption_data + + return self._perform_request(request, _parse_base_properties) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/models.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/models.py new file mode 100644 index 000000000000..e39067aa3ac1 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/models.py @@ -0,0 +1,781 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ..common._common_conversion import _to_str + + +class Container(object): + ''' + Blob container class. + + :ivar str name: + The name of the container. + :ivar metadata: + A dict containing name-value pairs associated with the container as metadata. + This var is set to None unless the include=metadata param was included + for the list containers operation. If this parameter was specified but the + container has no metadata, metadata will be set to an empty dictionary. + :vartype metadata: dict(str, str) + :ivar ContainerProperties properties: + System properties for the container. + ''' + + def __init__(self, name=None, props=None, metadata=None): + self.name = name + self.properties = props or ContainerProperties() + self.metadata = metadata + + +class ContainerProperties(object): + ''' + Blob container's properties class. + + :ivar datetime last_modified: + A datetime object representing the last time the container was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar LeaseProperties lease: + Stores all the lease information for the container. + :ivar bool has_immutability_policy: + Represents whether the container has an immutability policy. + :ivar bool has_legal_hold: + Represents whether the container has a legal hold. + ''' + + def __init__(self): + self.last_modified = None + self.etag = None + self.lease = LeaseProperties() + self.public_access = None + self.has_immutability_policy = None + self.has_legal_hold = None + + +class Blob(object): + ''' + Blob class. + + :ivar str name: + Name of blob. + :ivar str snapshot: + A DateTime value that uniquely identifies the snapshot. The value of + this header indicates the snapshot version, and may be used in + subsequent requests to access the snapshot. + :ivar content: + Blob content. + :vartype content: str or bytes + :ivar BlobProperties properties: + Stores all the system properties for the blob. + :ivar metadata: + Name-value pairs associated with the blob as metadata. + :ivar bool deleted: + Specify whether the blob was soft deleted. + In other words, if the blob is being retained by the delete retention policy, + this field would be True. The blob could be undeleted or it will be garbage collected after the specified + time period. + ''' + + def __init__(self, name=None, snapshot=None, content=None, props=None, metadata=None, deleted=False): + self.name = name + self.snapshot = snapshot + self.content = content + self.properties = props or BlobProperties() + self.metadata = metadata + self.deleted = deleted + + +class BlobProperties(object): + ''' + Blob Properties + + :ivar str blob_type: + String indicating this blob's type. + :ivar datetime last_modified: + A datetime object representing the last time the blob was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar int content_length: + The length of the content returned. If the entire blob was requested, + the length of blob in bytes. If a subset of the blob was requested, the + length of the returned subset. + :ivar str content_range: + Indicates the range of bytes returned in the event that the client + requested a subset of the blob. + :ivar int append_blob_committed_block_count: + (For Append Blobs) Number of committed blocks in the blob. + :ivar int page_blob_sequence_number: + (For Page Blobs) Sequence number for page blob used for coordinating + concurrent writes. + :ivar bool server_encrypted: + Set to true if the blob is encrypted on the server. + :ivar ~azure.storage.blob.models.CopyProperties copy: + Stores all the copy properties for the blob. + :ivar ~azure.storage.blob.models.ContentSettings content_settings: + Stores all the content settings for the blob. + :ivar ~azure.storage.blob.models.LeaseProperties lease: + Stores all the lease information for the blob. + :ivar StandardBlobTier blob_tier: + Indicates the access tier of the blob. The hot tier is optimized + for storing data that is accessed frequently. The cool storage tier + is optimized for storing data that is infrequently accessed and stored + for at least a month. The archive tier is optimized for storing + data that is rarely accessed and stored for at least six months + with flexible latency requirements. + :ivar datetime blob_tier_change_time: + Indicates when the access tier was last changed. + :ivar bool blob_tier_inferred: + Indicates whether the access tier was inferred by the service. + If false, it indicates that the tier was set explicitly. + :ivar datetime deleted_time: + A datetime object representing the time at which the blob was deleted. + :ivar int remaining_retention_days: + The number of days that the blob will be retained before being permanently deleted by the service. + :ivar datetime creation_time: + Indicates when the blob was created, in UTC. + ''' + + def __init__(self): + self.blob_type = None + self.last_modified = None + self.etag = None + self.content_length = None + self.content_range = None + self.append_blob_committed_block_count = None + self.page_blob_sequence_number = None + self.server_encrypted = None + self.copy = CopyProperties() + self.content_settings = ContentSettings() + self.lease = LeaseProperties() + self.blob_tier = None + self.blob_tier_change_time = None + self.blob_tier_inferred = False + self.deleted_time = None + self.remaining_retention_days = None + self.creation_time = None + + +class ContentSettings(object): + ''' + Used to store the content settings of a blob. + + :ivar str content_type: + The content type specified for the blob. If no content type was + specified, the default content type is application/octet-stream. + :ivar str content_encoding: + If the content_encoding has previously been set + for the blob, that value is stored. + :ivar str content_language: + If the content_language has previously been set + for the blob, that value is stored. + :ivar str content_disposition: + content_disposition conveys additional information about how to + process the response payload, and also can be used to attach + additional metadata. If content_disposition has previously been set + for the blob, that value is stored. + :ivar str cache_control: + If the cache_control has previously been set for + the blob, that value is stored. + :ivar str content_md5: + If the content_md5 has been set for the blob, this response + header is stored so that the client can check for message content + integrity. + ''' + + def __init__( + self, content_type=None, content_encoding=None, + content_language=None, content_disposition=None, + cache_control=None, content_md5=None): + self.content_type = content_type + self.content_encoding = content_encoding + self.content_language = content_language + self.content_disposition = content_disposition + self.cache_control = cache_control + self.content_md5 = content_md5 + + def _to_headers(self): + return { + 'x-ms-blob-cache-control': _to_str(self.cache_control), + 'x-ms-blob-content-type': _to_str(self.content_type), + 'x-ms-blob-content-disposition': _to_str(self.content_disposition), + 'x-ms-blob-content-md5': _to_str(self.content_md5), + 'x-ms-blob-content-encoding': _to_str(self.content_encoding), + 'x-ms-blob-content-language': _to_str(self.content_language), + } + + +class CopyProperties(object): + ''' + Blob Copy Properties. + + :ivar str id: + String identifier for the last attempted Copy Blob operation where this blob + was the destination blob. This header does not appear if this blob has never + been the destination in a Copy Blob operation, or if this blob has been + modified after a concluded Copy Blob operation using Set Blob Properties, + Put Blob, or Put Block List. + :ivar str source: + URL up to 2 KB in length that specifies the source blob used in the last attempted + Copy Blob operation where this blob was the destination blob. This header does not + appear if this blob has never been the destination in a Copy Blob operation, or if + this blob has been modified after a concluded Copy Blob operation using + Set Blob Properties, Put Blob, or Put Block List. + :ivar str status: + State of the copy operation identified by Copy ID, with these values: + success: + Copy completed successfully. + pending: + Copy is in progress. Check copy_status_description if intermittent, + non-fatal errors impede copy progress but don't cause failure. + aborted: + Copy was ended by Abort Copy Blob. + failed: + Copy failed. See copy_status_description for failure details. + :ivar str progress: + Contains the number of bytes copied and the total bytes in the source in the last + attempted Copy Blob operation where this blob was the destination blob. Can show + between 0 and Content-Length bytes copied. + :ivar datetime completion_time: + Conclusion time of the last attempted Copy Blob operation where this blob was the + destination blob. This value can specify the time of a completed, aborted, or + failed copy attempt. + :ivar str status_description: + only appears when x-ms-copy-status is failed or pending. Describes cause of fatal + or non-fatal copy operation failure. + ''' + + def __init__(self): + self.id = None + self.source = None + self.status = None + self.progress = None + self.completion_time = None + self.status_description = None + + +class LeaseProperties(object): + ''' + Blob Lease Properties. + + :ivar str status: + The lease status of the blob. + Possible values: locked|unlocked + :ivar str state: + Lease state of the blob. + Possible values: available|leased|expired|breaking|broken + :ivar str duration: + When a blob is leased, specifies whether the lease is of infinite or fixed duration. + ''' + + def __init__(self): + self.status = None + self.state = None + self.duration = None + + +class BlobPrefix(object): + ''' + BlobPrefix objects may potentially returned in the blob list when + :func:`~azure.storage.blob.baseblobservice.BaseBlobService.list_blobs` is + used with a delimiter. Prefixes can be thought of as virtual blob directories. + + :ivar str name: The name of the blob prefix. + ''' + + def __init__(self): + self.name = None + + +class BlobBlockState(object): + '''Block blob block types.''' + + Committed = 'Committed' + '''Committed blocks.''' + + Latest = 'Latest' + '''Latest blocks.''' + + Uncommitted = 'Uncommitted' + '''Uncommitted blocks.''' + + +class BlobBlock(object): + ''' + BlockBlob Block class. + + :ivar str id: + Block id. + :ivar str state: + Block state. + Possible valuse: committed|uncommitted + :ivar int size: + Block size in bytes. + ''' + + def __init__(self, id=None, state=BlobBlockState.Latest): + self.id = id + self.state = state + + def _set_size(self, size): + self.size = size + + +class BlobBlockList(object): + ''' + Blob Block List class. + + :ivar committed_blocks: + List of committed blocks. + :vartype committed_blocks: list(:class:`~azure.storage.blob.models.BlobBlock`) + :ivar uncommitted_blocks: + List of uncommitted blocks. + :vartype uncommitted_blocks: list(:class:`~azure.storage.blob.models.BlobBlock`) + ''' + + def __init__(self): + self.committed_blocks = list() + self.uncommitted_blocks = list() + + +class PageRange(object): + ''' + Page Range for page blob. + + :ivar int start: + Start of page range in bytes. + :ivar int end: + End of page range in bytes. + :ivar bool is_cleared: + Indicates if a page range is cleared or not. Only applicable + for get_page_range_diff API. + ''' + + def __init__(self, start=None, end=None, is_cleared=False): + self.start = start + self.end = end + self.is_cleared = is_cleared + + +class ResourceProperties(object): + ''' + Base response for a resource request. + + :ivar str etag: + Opaque etag value that can be used to check if resource + has been modified. + :ivar datetime last_modified: + Datetime for last time resource was modified. + ''' + + def __init__(self): + self.last_modified = None + self.etag = None + + +class AppendBlockProperties(ResourceProperties): + ''' + Response for an append block request. + + :ivar int append_offset: + Position to start next append. + :ivar int committed_block_count: + Number of committed append blocks. + ''' + + def __init__(self): + super(ResourceProperties, self).__init__() + self.append_offset = None + self.committed_block_count = None + + +class PageBlobProperties(ResourceProperties): + ''' + Response for a page request. + + :ivar int sequence_number: + Identifer for page blobs to help handle concurrent writes. + ''' + + def __init__(self): + super(ResourceProperties, self).__init__() + self.sequence_number = None + + +class PublicAccess(object): + ''' + Specifies whether data in the container may be accessed publicly and the level of access. + ''' + + OFF = 'off' + ''' + Specifies that there is no public read access for both the container and blobs within the container. + Clients cannot enumerate the containers within the storage account as well as the blobs within the container. + ''' + + Blob = 'blob' + ''' + Specifies public read access for blobs. Blob data within this container can be read + via anonymous request, but container data is not available. Clients cannot enumerate + blobs within the container via anonymous request. + ''' + + Container = 'container' + ''' + Specifies full public read access for container and blob data. Clients can enumerate + blobs within the container via anonymous request, but cannot enumerate containers + within the storage account. + ''' + + +class DeleteSnapshot(object): + ''' + Required if the blob has associated snapshots. Specifies how to handle the snapshots. + ''' + + Include = 'include' + ''' + Delete the base blob and all of its snapshots. + ''' + + Only = 'only' + ''' + Delete only the blob's snapshots and not the blob itself. + ''' + + +class BlockListType(object): + ''' + Specifies whether to return the list of committed blocks, the list of uncommitted + blocks, or both lists together. + ''' + + All = 'all' + '''Both committed and uncommitted blocks.''' + + Committed = 'committed' + '''Committed blocks.''' + + Uncommitted = 'uncommitted' + '''Uncommitted blocks.''' + + +class SequenceNumberAction(object): + '''Sequence number actions.''' + + Increment = 'increment' + ''' + Increments the value of the sequence number by 1. If specifying this option, + do not include the x-ms-blob-sequence-number header. + ''' + + Max = 'max' + ''' + Sets the sequence number to be the higher of the value included with the + request and the value currently stored for the blob. + ''' + + Update = 'update' + '''Sets the sequence number to the value included with the request.''' + + +class _LeaseActions(object): + '''Actions for a lease.''' + + Acquire = 'acquire' + '''Acquire the lease.''' + + Break = 'break' + '''Break the lease.''' + + Change = 'change' + '''Change the lease ID.''' + + Release = 'release' + '''Release the lease.''' + + Renew = 'renew' + '''Renew the lease.''' + + +class _BlobTypes(object): + '''Blob type options.''' + + AppendBlob = 'AppendBlob' + '''Append blob type.''' + + BlockBlob = 'BlockBlob' + '''Block blob type.''' + + PageBlob = 'PageBlob' + '''Page blob type.''' + + +class Include(object): + ''' + Specifies the datasets to include in the blob list response. + + :ivar ~azure.storage.blob.models.Include Include.COPY: + Specifies that metadata related to any current or previous Copy Blob operation + should be included in the response. + :ivar ~azure.storage.blob.models.Include Include.METADATA: + Specifies that metadata be returned in the response. + :ivar ~azure.storage.blob.models.Include Include.SNAPSHOTS: + Specifies that snapshots should be included in the enumeration. + :ivar ~azure.storage.blob.models.Include Include.UNCOMMITTED_BLOBS: + Specifies that blobs for which blocks have been uploaded, but which have not + been committed using Put Block List, be included in the response. + :ivar ~azure.storage.blob.models.Include Include.DELETED: + Specifies that deleted blobs should be returned in the response. + ''' + + def __init__(self, snapshots=False, metadata=False, uncommitted_blobs=False, + copy=False, deleted=False, _str=None): + ''' + :param bool snapshots: + Specifies that snapshots should be included in the enumeration. + :param bool metadata: + Specifies that metadata be returned in the response. + :param bool uncommitted_blobs: + Specifies that blobs for which blocks have been uploaded, but which have + not been committed using Put Block List, be included in the response. + :param bool copy: + Specifies that metadata related to any current or previous Copy Blob + operation should be included in the response. + :param bool deleted: + Specifies that deleted blobs should be returned in the response. + :param str _str: + A string representing the includes. + ''' + if not _str: + _str = '' + components = _str.split(',') + self.snapshots = snapshots or ('snapshots' in components) + self.metadata = metadata or ('metadata' in components) + self.uncommitted_blobs = uncommitted_blobs or ('uncommittedblobs' in components) + self.copy = copy or ('copy' in components) + self.deleted = deleted or ('deleted' in components) + + def __or__(self, other): + return Include(_str=str(self) + str(other)) + + def __add__(self, other): + return Include(_str=str(self) + str(other)) + + def __str__(self): + include = (('snapshots,' if self.snapshots else '') + + ('metadata,' if self.metadata else '') + + ('uncommittedblobs,' if self.uncommitted_blobs else '') + + ('copy,' if self.copy else '') + + ('deleted,' if self.deleted else '')) + return include.rstrip(',') + + +Include.COPY = Include(copy=True) +Include.METADATA = Include(metadata=True) +Include.SNAPSHOTS = Include(snapshots=True) +Include.UNCOMMITTED_BLOBS = Include(uncommitted_blobs=True) +Include.DELETED = Include(deleted=True) + + +class BlobPermissions(object): + ''' + BlobPermissions class to be used with + :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_blob_shared_access_signature` API. + + :ivar BlobPermissions BlobPermissions.ADD: + Add a block to an append blob. + :ivar BlobPermissions BlobPermissions.CREATE: + Write a new blob, snapshot a blob, or copy a blob to a new blob. + :ivar BlobPermissions BlobPermissions.DELETE: + Delete the blob. + :ivar BlobPermissions BlobPermissions.READ: + Read the content, properties, metadata and block list. Use the blob as the source of a copy operation. + :ivar BlobPermissions BlobPermissions.WRITE: + Create or write content, properties, metadata, or block list. Snapshot or lease + the blob. Resize the blob (page blob only). Use the blob as the destination of a + copy operation within the same account. + ''' + + def __init__(self, read=False, add=False, create=False, write=False, + delete=False, _str=None): + ''' + :param bool read: + Read the content, properties, metadata and block list. Use the blob as + the source of a copy operation. + :param bool add: + Add a block to an append blob. + :param bool create: + Write a new blob, snapshot a blob, or copy a blob to a new blob. + :param bool write: + Create or write content, properties, metadata, or block list. Snapshot + or lease the blob. Resize the blob (page blob only). Use the blob as the + destination of a copy operation within the same account. + :param bool delete: + Delete the blob. + :param str _str: + A string representing the permissions. + ''' + if not _str: + _str = '' + self.read = read or ('r' in _str) + self.add = add or ('a' in _str) + self.create = create or ('c' in _str) + self.write = write or ('w' in _str) + self.delete = delete or ('d' in _str) + + def __or__(self, other): + return BlobPermissions(_str=str(self) + str(other)) + + def __add__(self, other): + return BlobPermissions(_str=str(self) + str(other)) + + def __str__(self): + return (('r' if self.read else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('w' if self.write else '') + + ('d' if self.delete else '')) + + +BlobPermissions.ADD = BlobPermissions(add=True) +BlobPermissions.CREATE = BlobPermissions(create=True) +BlobPermissions.DELETE = BlobPermissions(delete=True) +BlobPermissions.READ = BlobPermissions(read=True) +BlobPermissions.WRITE = BlobPermissions(write=True) + + +class ContainerPermissions(object): + ''' + ContainerPermissions class to be used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_container_shared_access_signature` + API and for the AccessPolicies used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.set_container_acl`. + + :ivar ContainerPermissions ContainerPermissions.DELETE: + Delete any blob in the container. Note: You cannot grant permissions to + delete a container with a container SAS. Use an account SAS instead. + :ivar ContainerPermissions ContainerPermissions.LIST: + List blobs in the container. + :ivar ContainerPermissions ContainerPermissions.READ: + Read the content, properties, metadata or block list of any blob in the + container. Use any blob in the container as the source of a copy operation. + :ivar ContainerPermissions ContainerPermissions.WRITE: + For any blob in the container, create or write content, properties, + metadata, or block list. Snapshot or lease the blob. Resize the blob + (page blob only). Use the blob as the destination of a copy operation + within the same account. Note: You cannot grant permissions to read or + write container properties or metadata, nor to lease a container, with + a container SAS. Use an account SAS instead. + ''' + + def __init__(self, read=False, write=False, delete=False, list=False, + _str=None): + ''' + :param bool read: + Read the content, properties, metadata or block list of any blob in the + container. Use any blob in the container as the source of a copy operation. + :param bool write: + For any blob in the container, create or write content, properties, + metadata, or block list. Snapshot or lease the blob. Resize the blob + (page blob only). Use the blob as the destination of a copy operation + within the same account. Note: You cannot grant permissions to read or + write container properties or metadata, nor to lease a container, with + a container SAS. Use an account SAS instead. + :param bool delete: + Delete any blob in the container. Note: You cannot grant permissions to + delete a container with a container SAS. Use an account SAS instead. + :param bool list: + List blobs in the container. + :param str _str: + A string representing the permissions. + ''' + if not _str: + _str = '' + self.read = read or ('r' in _str) + self.write = write or ('w' in _str) + self.delete = delete or ('d' in _str) + self.list = list or ('l' in _str) + + def __or__(self, other): + return ContainerPermissions(_str=str(self) + str(other)) + + def __add__(self, other): + return ContainerPermissions(_str=str(self) + str(other)) + + def __str__(self): + return (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('l' if self.list else '')) + + +ContainerPermissions.DELETE = ContainerPermissions(delete=True) +ContainerPermissions.LIST = ContainerPermissions(list=True) +ContainerPermissions.READ = ContainerPermissions(read=True) +ContainerPermissions.WRITE = ContainerPermissions(write=True) + + +class PremiumPageBlobTier(object): + ''' + Specifies the page blob tier to set the blob to. This is only applicable to page + blobs on premium storage accounts. + Please take a look at https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets + for detailed information on the corresponding IOPS and throughtput per PageBlobTier. + ''' + + P4 = 'P4' + ''' P4 Tier ''' + + P6 = 'P6' + ''' P6 Tier ''' + + P10 = 'P10' + ''' P10 Tier ''' + + P20 = 'P20' + ''' P20 Tier ''' + + P30 = 'P30' + ''' P30 Tier ''' + + P40 = 'P40' + ''' P40 Tier ''' + + P50 = 'P50' + ''' P50 Tier ''' + + P60 = 'P60' + ''' P60 Tier ''' + + +class StandardBlobTier(object): + ''' + Specifies the blob tier to set the blob to. This is only applicable for block blobs on standard storage accounts. + ''' + + Archive = 'Archive' + ''' Archive ''' + + Cool = 'Cool' + ''' Cool ''' + + Hot = 'Hot' + ''' Hot ''' + + +class AccountInformation(object): + """ + Holds information related to the storage account. + + :ivar str sku_name: + Name of the storage SKU, also known as account type. + Example: Standard_LRS, Standard_ZRS, Standard_GRS, Standard_RAGRS, Premium_LRS, Premium_ZRS + :ivar str account_kind: + Describes the flavour of the storage account, also known as account kind. + Example: Storage, StorageV2, BlobStorage + """ + def __init__(self): + self.sku_name = None + self.account_kind = None diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/pageblobservice.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/pageblobservice.py new file mode 100644 index 000000000000..476d55a49071 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/pageblobservice.py @@ -0,0 +1,1394 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import sys +from os import path + + +from ..common._common_conversion import ( + _int_to_str, + _to_str, + _datetime_to_utc_string, + _get_content_md5, +) +from ..common._constants import ( + SERVICE_HOST_BASE, + DEFAULT_PROTOCOL, +) +from ..common._error import ( + _validate_not_none, + _validate_type_bytes, + _validate_encryption_required, + _validate_encryption_unsupported, + _ERROR_VALUE_NEGATIVE, +) +from ..common._http import HTTPRequest +from ..common._serialization import ( + _get_data_bytes_only, + _add_metadata_headers, +) + +from ._deserialization import ( + _convert_xml_to_page_ranges, + _parse_page_properties, + _parse_base_properties, +) +from ._encryption import _generate_blob_encryption_data +from ._error import ( + _ERROR_PAGE_BLOB_SIZE_ALIGNMENT, +) +from ._serialization import ( + _get_path, + _validate_and_format_range_headers, +) +from ._upload_chunking import ( + _PageBlobChunkUploader, + _upload_blob_chunks, +) +from .baseblobservice import BaseBlobService +from .models import ( + _BlobTypes, + ResourceProperties) + +if sys.version_info >= (3,): + from io import BytesIO +else: + from cStringIO import StringIO as BytesIO + +# Keep this value sync with _ERROR_PAGE_BLOB_SIZE_ALIGNMENT +_PAGE_ALIGNMENT = 512 + + +class PageBlobService(BaseBlobService): + ''' + Page blobs are a collection of 512-byte pages optimized for random read and + write operations. To create a page blob, you initialize the page blob and + specify the maximum size the page blob will grow. To add or update the + contents of a page blob, you write a page or pages by specifying an offset + and a range that align to 512-byte page boundaries. A write to a page blob + can overwrite just one page, some pages, or up to 4 MB of the page blob. + Writes to page blobs happen in-place and are immediately committed to the + blob. The maximum size for a page blob is 8 TB. + + :ivar int MAX_PAGE_SIZE: + The size of the pages put by create_blob_from_* methods. Smaller pages + may be put if there is less data provided. The maximum page size the service + supports is 4MB. When using the create_blob_from_* methods, empty pages are skipped. + ''' + + MAX_PAGE_SIZE = 4 * 1024 * 1024 + + def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=False, + protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, custom_domain=None, + request_session=None, connection_string=None, socket_timeout=None, token_credential=None): + ''' + :param str account_name: + The storage account name. This is used to authenticate requests + signed with an account key and to construct the storage endpoint. It + is required unless a connection string is given, or if a custom + domain is used with anonymous authentication. + :param str account_key: + The storage account key. This is used for shared key authentication. + If neither account key or sas token is specified, anonymous access + will be used. + :param str sas_token: + A shared access signature token to use to authenticate requests + instead of the account key. If account key and sas token are both + specified, account key will be used to sign. If neither are + specified, anonymous access will be used. + :param bool is_emulated: + Whether to use the emulator. Defaults to False. If specified, will + override all other parameters besides connection string and request + session. + :param str protocol: + The protocol to use for requests. Defaults to https. + :param str endpoint_suffix: + The host base component of the url, minus the account name. Defaults + to Azure (core.windows.net). Override this to use the China cloud + (core.chinacloudapi.cn). + :param str custom_domain: + The custom domain to use. This can be set in the Azure Portal. For + example, 'www.mydomain.com'. + :param requests.Session request_session: + The session object to use for http requests. + :param str connection_string: + If specified, this will override all other parameters besides + request session. See + http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ + for the connection string format. + :param int socket_timeout: + If specified, this will override the default socket timeout. The timeout specified is in seconds. + See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value. + :param token_credential: + A token credential used to authenticate HTTPS requests. The token value + should be updated before its expiration. + :type `~azure.storage.common.TokenCredential` + ''' + self.blob_type = _BlobTypes.PageBlob + super(PageBlobService, self).__init__( + account_name, account_key, sas_token, is_emulated, protocol, endpoint_suffix, + custom_domain, request_session, connection_string, socket_timeout, token_credential) + + def create_blob( + self, container_name, blob_name, content_length, content_settings=None, + sequence_number=None, metadata=None, lease_id=None, if_modified_since=None, + if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None): + ''' + Creates a new Page Blob. + + See create_blob_from_* for high level functions that handle the + creation and upload of large blobs with automatic chunking and + progress notifications. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob to create or update. + :param int content_length: + Required. This header specifies the maximum size + for the page blob, up to 1 TB. The page blob size must be aligned + to a 512-byte boundary. + :param ~azure.storage.blob.models.ContentSettings content_settings: + ContentSettings object used to set properties on the blob. + :param int sequence_number: + The sequence number is a user-controlled value that you can use to + track requests. The value of the sequence number must be between 0 + and 2^63 - 1.The default value is 0. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :param PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :return: ETag and last modified properties for the new Page Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) + + return self._create_blob( + container_name, + blob_name, + content_length, + content_settings=content_settings, + sequence_number=sequence_number, + metadata=metadata, + lease_id=lease_id, + premium_page_blob_tier=premium_page_blob_tier, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match, + timeout=timeout + ) + + def incremental_copy_blob(self, container_name, blob_name, copy_source, + metadata=None, destination_if_modified_since=None, destination_if_unmodified_since=None, + destination_if_match=None, destination_if_none_match=None, destination_lease_id=None, + source_lease_id=None, timeout=None): + ''' + Copies an incremental copy of a blob asynchronously. This operation returns a copy operation + properties object, including a copy ID you can use to check or abort the + copy operation. The Blob service copies blobs on a best-effort basis. + + The source blob for an incremental copy operation must be a page blob. + Call get_blob_properties on the destination blob to check the status of the copy operation. + The final blob will be committed when the copy completes. + + :param str container_name: + Name of the destination container. The container must exist. + :param str blob_name: + Name of the destination blob. If the destination blob exists, it will + be overwritten. Otherwise, it will be created. + :param str copy_source: + A URL of up to 2 KB in length that specifies an Azure page blob. + The value should be URL-encoded as it would appear in a request URI. + The copy source must be a snapshot and include a valid SAS token or be public. + Example: + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=&sastoken + :param metadata: + Name-value pairs associated with the blob as metadata. If no name-value + pairs are specified, the operation will copy the metadata from the + source blob or file to the destination blob. If one or more name-value + pairs are specified, the destination blob is created with the specified + metadata, and metadata is not copied from the source blob or file. + :type metadata: dict(str, str). + :param datetime destination_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has been modified since the specified date/time. + If the destination blob has not been modified, the Blob service returns + status code 412 (Precondition Failed). + :param datetime destination_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the destination blob + has not been modified since the specified ate/time. If the destination blob + has been modified, the Blob service returns status code 412 (Precondition Failed). + :param ETag destination_if_match: + An ETag value, or the wildcard character (*). Specify an ETag value for + this conditional header to copy the blob only if the specified ETag value + matches the ETag value for an existing destination blob. If the ETag for + the destination blob does not match the ETag specified for If-Match, the + Blob service returns status code 412 (Precondition Failed). + :param ETag destination_if_none_match: + An ETag value, or the wildcard character (*). Specify an ETag value for + this conditional header to copy the blob only if the specified ETag value + does not match the ETag value for the destination blob. Specify the wildcard + character (*) to perform the operation only if the destination blob does not + exist. If the specified condition isn't met, the Blob service returns status + code 412 (Precondition Failed). + :param str destination_lease_id: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :param str source_lease_id: + Specify this to perform the Copy Blob operation only if + the lease ID given matches the active lease ID of the source blob. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: Copy operation properties such as status, source, and ID. + :rtype: :class:`~azure.storage.blob.models.CopyProperties` + ''' + return self._copy_blob(container_name, blob_name, copy_source, + metadata, + source_if_modified_since=None, source_if_unmodified_since=None, + source_if_match=None, source_if_none_match=None, + destination_if_modified_since=destination_if_modified_since, + destination_if_unmodified_since=destination_if_unmodified_since, + destination_if_match=destination_if_match, + destination_if_none_match=destination_if_none_match, + destination_lease_id=destination_lease_id, + source_lease_id=source_lease_id, timeout=timeout, + incremental_copy=True) + + def update_page( + self, container_name, blob_name, page, start_range, end_range, + validate_content=False, lease_id=None, if_sequence_number_lte=None, + if_sequence_number_lt=None, if_sequence_number_eq=None, + if_modified_since=None, if_unmodified_since=None, + if_match=None, if_none_match=None, timeout=None): + ''' + Updates a range of pages. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param bytes page: + Content of the page. + :param int start_range: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the end offset must be a modulus of + 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. + :param int end_range: + End of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the end offset must be a modulus of + 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. + :param bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + blob. + :param str lease_id: + Required if the blob has an active lease. + :param int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :param int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :param int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify an ETag value for this conditional + header to write the page only if the blob's ETag value matches the + value specified. If the values do not match, the Blob service fails. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify an ETag value for this conditional + header to write the page only if the blob's ETag value does not + match the value specified. If the values are identical, the Blob + service fails. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: ETag and last modified properties for the updated Page Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + + _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) + + return self._update_page( + container_name, + blob_name, + page, + start_range, + end_range, + validate_content=validate_content, + lease_id=lease_id, + if_sequence_number_lte=if_sequence_number_lte, + if_sequence_number_lt=if_sequence_number_lt, + if_sequence_number_eq=if_sequence_number_eq, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match, + timeout=timeout + ) + + def clear_page( + self, container_name, blob_name, start_range, end_range, + lease_id=None, if_sequence_number_lte=None, + if_sequence_number_lt=None, if_sequence_number_eq=None, + if_modified_since=None, if_unmodified_since=None, + if_match=None, if_none_match=None, timeout=None): + ''' + Clears a range of pages. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param int start_range: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the end offset must be a modulus of + 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. + :param int end_range: + End of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the end offset must be a modulus of + 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. + :param str lease_id: + Required if the blob has an active lease. + :param int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :param int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :param int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify an ETag value for this conditional + header to write the page only if the blob's ETag value matches the + value specified. If the values do not match, the Blob service fails. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify an ETag value for this conditional + header to write the page only if the blob's ETag value does not + match the value specified. If the values are identical, the Blob + service fails. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: ETag and last modified properties for the updated Page Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'page', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-page-write': 'clear', + 'x-ms-lease-id': _to_str(lease_id), + 'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte), + 'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt), + 'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match) + } + _validate_and_format_range_headers( + request, + start_range, + end_range, + align_to_page=True) + + return self._perform_request(request, _parse_page_properties) + + def get_page_ranges( + self, container_name, blob_name, snapshot=None, start_range=None, + end_range=None, lease_id=None, if_modified_since=None, + if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): + ''' + Returns the list of valid page ranges for a Page Blob or snapshot + of a page blob. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param str snapshot: + The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to retrieve information + from. + :param int start_range: + Start of byte range to use for getting valid page ranges. + If no end_range is given, all bytes after the start_range will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the end offset must be a modulus of + 512-1. Examples of valid byte ranges are 0-511, 512-, etc. + :param int end_range: + End of byte range to use for getting valid page ranges. + If end_range is given, start_range must be provided. + This range will return valid page ranges for from the offset start up to + offset end. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the end offset must be a modulus of + 512-1. Examples of valid byte ranges are 0-511, 512-, etc. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: A list of valid Page Ranges for the Page Blob. + :rtype: list(:class:`~azure.storage.blob.models.PageRange`) + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + request = HTTPRequest() + request.method = 'GET' + request.host_locations = self._get_host_locations(secondary=True) + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'pagelist', + 'snapshot': _to_str(snapshot), + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-lease-id': _to_str(lease_id), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match), + } + if start_range is not None: + _validate_and_format_range_headers( + request, + start_range, + end_range, + start_range_required=False, + end_range_required=False, + align_to_page=True) + + return self._perform_request(request, _convert_xml_to_page_ranges) + + def get_page_ranges_diff( + self, container_name, blob_name, previous_snapshot, snapshot=None, + start_range=None, end_range=None, lease_id=None, if_modified_since=None, + if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None): + ''' + The response will include only the pages that are different between either a + recent snapshot or the current blob and a previous snapshot, including pages + that were cleared. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param str previous_snapshot: + The snapshot parameter is an opaque DateTime value that + specifies a previous blob snapshot to be compared + against a more recent snapshot or the current blob. + :param str snapshot: + The snapshot parameter is an opaque DateTime value that + specifies a more recent blob snapshot to be compared + against a previous snapshot (previous_snapshot). + :param int start_range: + Start of byte range to use for getting different page ranges. + If no end_range is given, all bytes after the start_range will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the end offset must be a modulus of + 512-1. Examples of valid byte ranges are 0-511, 512-, etc. + :param int end_range: + End of byte range to use for getting different page ranges. + If end_range is given, start_range must be provided. + This range will return valid page ranges for from the offset start up to + offset end. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the end offset must be a modulus of + 512-1. Examples of valid byte ranges are 0-511, 512-, etc. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: A list of different Page Ranges for the Page Blob. + :rtype: list(:class:`~azure.storage.blob.models.PageRange`) + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('previous_snapshot', previous_snapshot) + request = HTTPRequest() + request.method = 'GET' + request.host_locations = self._get_host_locations(secondary=True) + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'pagelist', + 'snapshot': _to_str(snapshot), + 'prevsnapshot': _to_str(previous_snapshot), + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-lease-id': _to_str(lease_id), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match), + } + if start_range is not None: + _validate_and_format_range_headers( + request, + start_range, + end_range, + start_range_required=False, + end_range_required=False, + align_to_page=True) + + return self._perform_request(request, _convert_xml_to_page_ranges) + + def set_sequence_number( + self, container_name, blob_name, sequence_number_action, sequence_number=None, + lease_id=None, if_modified_since=None, if_unmodified_since=None, + if_match=None, if_none_match=None, timeout=None): + + ''' + Sets the blob sequence number. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param str sequence_number_action: + This property indicates how the service should modify the blob's sequence + number. See :class:`~azure.storage.blob.models.SequenceNumberAction` for more information. + :param str sequence_number: + This property sets the blob's sequence number. The sequence number is a + user-controlled property that you can use to track requests and manage + concurrency issues. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: ETag and last modified properties for the updated Page Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('sequence_number_action', sequence_number_action) + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'properties', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-blob-sequence-number': _to_str(sequence_number), + 'x-ms-sequence-number-action': _to_str(sequence_number_action), + 'x-ms-lease-id': _to_str(lease_id), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match), + } + + return self._perform_request(request, _parse_page_properties) + + def resize_blob( + self, container_name, blob_name, content_length, + lease_id=None, if_modified_since=None, if_unmodified_since=None, + if_match=None, if_none_match=None, timeout=None): + + ''' + Resizes a page blob to the specified size. If the specified value is less + than the current size of the blob, then all pages above the specified value + are cleared. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of existing blob. + :param int content_length: + Size to resize blob to. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: ETag and last modified properties for the updated Page Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('content_length', content_length) + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'properties', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-blob-content-length': _to_str(content_length), + 'x-ms-lease-id': _to_str(lease_id), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match), + } + + return self._perform_request(request, _parse_page_properties) + + # ----Convenience APIs----------------------------------------------------- + + def create_blob_from_path( + self, container_name, blob_name, file_path, content_settings=None, + metadata=None, validate_content=False, progress_callback=None, max_connections=2, + lease_id=None, if_modified_since=None, if_unmodified_since=None, + if_match=None, if_none_match=None, timeout=None, premium_page_blob_tier=None): + ''' + Creates a new blob from a file path, or updates the content of an + existing blob, with automatic chunking and progress notifications. + Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob to create or update. + :param str file_path: + Path of the file to upload as the blob content. + :param ~azure.storage.blob.models.ContentSettings content_settings: + ContentSettings object used to set blob properties. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param bool validate_content: + If true, calculates an MD5 hash for each page of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. + :param progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + :type progress_callback: func(current, total) + :param int max_connections: + Maximum number of parallel connections to use. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :param premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :return: ETag and last modified properties for the Page Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('file_path', file_path) + + count = path.getsize(file_path) + with open(file_path, 'rb') as stream: + return self.create_blob_from_stream( + container_name=container_name, + blob_name=blob_name, + stream=stream, + count=count, + content_settings=content_settings, + metadata=metadata, + validate_content=validate_content, + progress_callback=progress_callback, + max_connections=max_connections, + lease_id=lease_id, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match, + timeout=timeout, + premium_page_blob_tier=premium_page_blob_tier) + + def create_blob_from_stream( + self, container_name, blob_name, stream, count, content_settings=None, + metadata=None, validate_content=False, progress_callback=None, + max_connections=2, lease_id=None, if_modified_since=None, + if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, + premium_page_blob_tier=None): + ''' + Creates a new blob from a file/stream, or updates the content of an + existing blob, with automatic chunking and progress notifications. + Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob to create or update. + :param io.IOBase stream: + Opened file/stream to upload as the blob content. + :param int count: + Number of bytes to read from the stream. This is required, a page + blob cannot be created if the count is unknown. + :param ~azure.storage.blob.models.ContentSettings content_settings: + ContentSettings object used to set the blob properties. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param bool validate_content: + If true, calculates an MD5 hash for each page of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. + :param progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + :type progress_callback: func(current, total) + :param int max_connections: + Maximum number of parallel connections to use. Note that parallel upload + requires the stream to be seekable. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :param premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :return: ETag and last modified properties for the Page Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('stream', stream) + _validate_not_none('count', count) + _validate_encryption_required(self.require_encryption, self.key_encryption_key) + + if count < 0: + raise ValueError(_ERROR_VALUE_NEGATIVE.format('count')) + + if count % _PAGE_ALIGNMENT != 0: + raise ValueError(_ERROR_PAGE_BLOB_SIZE_ALIGNMENT.format(count)) + + cek, iv, encryption_data = None, None, None + if self.key_encryption_key is not None: + cek, iv, encryption_data = _generate_blob_encryption_data(self.key_encryption_key) + + response = self._create_blob( + container_name=container_name, + blob_name=blob_name, + content_length=count, + content_settings=content_settings, + metadata=metadata, + lease_id=lease_id, + premium_page_blob_tier=premium_page_blob_tier, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match, + timeout=timeout, + encryption_data=encryption_data + ) + + if count == 0: + return response + + # _upload_blob_chunks returns the block ids for block blobs so resource_properties + # is passed as a parameter to get the last_modified and etag for page and append blobs. + # this info is not needed for block_blobs since _put_block_list is called after which gets this info + resource_properties = ResourceProperties() + _upload_blob_chunks( + blob_service=self, + container_name=container_name, + blob_name=blob_name, + blob_size=count, + block_size=self.MAX_PAGE_SIZE, + stream=stream, + max_connections=max_connections, + progress_callback=progress_callback, + validate_content=validate_content, + lease_id=lease_id, + uploader_class=_PageBlobChunkUploader, + if_match=response.etag, + timeout=timeout, + content_encryption_key=cek, + initialization_vector=iv, + resource_properties=resource_properties + ) + + return resource_properties + + def create_blob_from_bytes( + self, container_name, blob_name, blob, index=0, count=None, + content_settings=None, metadata=None, validate_content=False, + progress_callback=None, max_connections=2, lease_id=None, + if_modified_since=None, if_unmodified_since=None, if_match=None, + if_none_match=None, timeout=None, premium_page_blob_tier=None): + ''' + Creates a new blob from an array of bytes, or updates the content + of an existing blob, with automatic chunking and progress + notifications. Empty chunks are skipped, while non-emtpy ones(even if only partly filled) are uploaded. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob to create or update. + :param bytes blob: + Content of blob as an array of bytes. + :param int index: + Start index in the byte array. + :param int count: + Number of bytes to upload. Set to None or negative value to upload + all bytes starting from index. + :param ~azure.storage.blob.models.ContentSettings content_settings: + ContentSettings object used to set blob properties. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param bool validate_content: + If true, calculates an MD5 hash for each page of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. + :param progress_callback: + Callback for progress with signature function(current, total) where + current is the number of bytes transfered so far, and total is the + size of the blob, or None if the total size is unknown. + :type progress_callback: func(current, total) + :param int max_connections: + Maximum number of parallel connections to use. + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + :param premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :return: ETag and last modified properties for the Page Blob + :rtype: :class:`~azure.storage.blob.models.ResourceProperties` + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('blob', blob) + _validate_type_bytes('blob', blob) + + if index < 0: + raise IndexError(_ERROR_VALUE_NEGATIVE.format('index')) + + if count is None or count < 0: + count = len(blob) - index + + stream = BytesIO(blob) + stream.seek(index) + + return self.create_blob_from_stream( + container_name=container_name, + blob_name=blob_name, + stream=stream, + count=count, + content_settings=content_settings, + metadata=metadata, + validate_content=validate_content, + lease_id=lease_id, + progress_callback=progress_callback, + max_connections=max_connections, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_match=if_match, + if_none_match=if_none_match, + timeout=timeout, + premium_page_blob_tier=premium_page_blob_tier) + + def set_premium_page_blob_tier( + self, container_name, blob_name, premium_page_blob_tier, + timeout=None): + ''' + Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob to update. + :param PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :param int timeout: + The timeout parameter is expressed in seconds. This method may make + multiple calls to the Azure service and the timeout will apply to + each call individually. + ''' + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('premium_page_blob_tier', premium_page_blob_tier) + + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'tier', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-access-tier': _to_str(premium_page_blob_tier) + } + + self._perform_request(request) + + def copy_blob(self, container_name, blob_name, copy_source, + metadata=None, + source_if_modified_since=None, + source_if_unmodified_since=None, + source_if_match=None, source_if_none_match=None, + destination_if_modified_since=None, + destination_if_unmodified_since=None, + destination_if_match=None, + destination_if_none_match=None, + destination_lease_id=None, + source_lease_id=None, timeout=None, + premium_page_blob_tier=None): + ''' + Copies a blob asynchronously. This operation returns a copy operation + properties object, including a copy ID you can use to check or abort the + copy operation. The Blob service copies blobs on a best-effort basis. + + The source blob for a copy operation must be a page blob. If the destination + blob already exists, it must be of the same blob type as the source blob. + Any existing destination blob will be overwritten. + The destination blob cannot be modified while a copy operation is in progress. + + When copying from a page blob, the Blob service creates a destination page + blob of the source blob's length, initially containing all zeroes. Then + the source page ranges are enumerated, and non-empty ranges are copied. + + If the tier on the source blob is larger than the tier being passed to this + copy operation or if the size of the blob exceeds the tier being passed to + this copy operation then the operation will fail. + + You can call get_blob_properties on the destination + blob to check the status of the copy operation. The final blob will be + committed when the copy completes. + + :param str container_name: + Name of the destination container. The container must exist. + :param str blob_name: + Name of the destination blob. If the destination blob exists, it will + be overwritten. Otherwise, it will be created. + :param str copy_source: + A URL of up to 2 KB in length that specifies an Azure file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :param metadata: + Name-value pairs associated with the blob as metadata. If no name-value + pairs are specified, the operation will copy the metadata from the + source blob or file to the destination blob. If one or more name-value + pairs are specified, the destination blob is created with the specified + metadata, and metadata is not copied from the source blob or file. + :type metadata: dict(str, str). + :param datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source + blob has been modified since the specified date/time. + :param datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source blob + has not been modified since the specified date/time. + :param ETag source_if_match: + An ETag value, or the wildcard character (*). Specify this conditional + header to copy the source blob only if its ETag matches the value + specified. If the ETag values do not match, the Blob service returns + status code 412 (Precondition Failed). This header cannot be specified + if the source is an Azure File. + :param ETag source_if_none_match: + An ETag value, or the wildcard character (*). Specify this conditional + header to copy the blob only if its ETag does not match the value + specified. If the values are identical, the Blob service returns status + code 412 (Precondition Failed). This header cannot be specified if the + source is an Azure File. + :param datetime destination_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has been modified since the specified date/time. + If the destination blob has not been modified, the Blob service returns + status code 412 (Precondition Failed). + :param datetime destination_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has not been modified since the specified + date/time. If the destination blob has been modified, the Blob service + returns status code 412 (Precondition Failed). + :param ETag destination_if_match: + An ETag value, or the wildcard character (*). Specify an ETag value for + this conditional header to copy the blob only if the specified ETag value + matches the ETag value for an existing destination blob. If the ETag for + the destination blob does not match the ETag specified for If-Match, the + Blob service returns status code 412 (Precondition Failed). + :param ETag destination_if_none_match: + An ETag value, or the wildcard character (*). Specify an ETag value for + this conditional header to copy the blob only if the specified ETag value + does not match the ETag value for the destination blob. Specify the wildcard + character (*) to perform the operation only if the destination blob does not + exist. If the specified condition isn't met, the Blob service returns status + code 412 (Precondition Failed). + :param str destination_lease_id: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :param str source_lease_id: + Specify this to perform the Copy Blob operation only if + the lease ID given matches the active lease ID of the source blob. + :param int timeout: + The timeout parameter is expressed in seconds. + :param PageBlobTier premium_page_blob_tier: + A page blob tier value to set on the destination blob. The tier correlates to + the size of the blob and number of allowed IOPS. This is only applicable to + page blobs on premium storage accounts. + If the tier on the source blob is larger than the tier being passed to this + copy operation or if the size of the blob exceeds the tier being passed to + this copy operation then the operation will fail. + :return: Copy operation properties such as status, source, and ID. + :rtype: :class:`~azure.storage.blob.models.CopyProperties` + ''' + return self._copy_blob(container_name, blob_name, copy_source, + metadata, premium_page_blob_tier, + source_if_modified_since, source_if_unmodified_since, + source_if_match, source_if_none_match, + destination_if_modified_since, + destination_if_unmodified_since, + destination_if_match, + destination_if_none_match, + destination_lease_id, + source_lease_id, timeout, + False) + + # -----Helper methods----------------------------------------------------- + + def _create_blob( + self, container_name, blob_name, content_length, content_settings=None, + sequence_number=None, metadata=None, lease_id=None, premium_page_blob_tier=None, if_modified_since=None, + if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None, + encryption_data=None): + ''' + See create_blob for more details. This helper method + allows for encryption or other such special behavior because + it is safely handled by the library. These behaviors are + prohibited in the public version of this function. + :param str encryption_data: + The JSON formatted encryption metadata to upload as a part of the blob. + This should only be passed internally from other methods and only applied + when uploading entire blob contents immediately follows creation of the blob. + ''' + + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('content_length', content_length) + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = {'timeout': _int_to_str(timeout)} + request.headers = { + 'x-ms-blob-type': _to_str(self.blob_type), + 'x-ms-blob-content-length': _to_str(content_length), + 'x-ms-lease-id': _to_str(lease_id), + 'x-ms-blob-sequence-number': _to_str(sequence_number), + 'x-ms-access-tier': _to_str(premium_page_blob_tier), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match) + } + _add_metadata_headers(metadata, request) + if content_settings is not None: + request.headers.update(content_settings._to_headers()) + + if encryption_data is not None: + request.headers['x-ms-meta-encryptiondata'] = encryption_data + + return self._perform_request(request, _parse_base_properties) + + def _update_page( + self, container_name, blob_name, page, start_range, end_range, + validate_content=False, lease_id=None, if_sequence_number_lte=None, + if_sequence_number_lt=None, if_sequence_number_eq=None, + if_modified_since=None, if_unmodified_since=None, + if_match=None, if_none_match=None, timeout=None): + ''' + See update_page for more details. This helper method + allows for encryption or other such special behavior because + it is safely handled by the library. These behaviors are + prohibited in the public version of this function. + ''' + + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'page', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-page-write': 'update', + 'x-ms-lease-id': _to_str(lease_id), + 'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte), + 'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt), + 'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match) + } + _validate_and_format_range_headers( + request, + start_range, + end_range, + align_to_page=True) + request.body = _get_data_bytes_only('page', page) + + if validate_content: + computed_md5 = _get_content_md5(request.body) + request.headers['Content-MD5'] = _to_str(computed_md5) + + return self._perform_request(request, _parse_page_properties) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/sharedaccesssignature.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/sharedaccesssignature.py new file mode 100644 index 000000000000..6947e7e10ff7 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/sharedaccesssignature.py @@ -0,0 +1,180 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ..common.sharedaccesssignature import ( + SharedAccessSignature, + _SharedAccessHelper, +) + +from ._constants import X_MS_VERSION + + +class BlobSharedAccessSignature(SharedAccessSignature): + ''' + Provides a factory for creating blob and container access + signature tokens with a common account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + ''' + super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) + + def generate_blob(self, container_name, blob_name, permission=None, + expiry=None, start=None, id=None, ip=None, protocol=None, + cache_control=None, content_disposition=None, + content_encoding=None, content_language=None, + content_type=None): + ''' + Generates a shared access signature for the blob. + Use the returned signature with the sas_token parameter of any BlobService. + + :param str container_name: + Name of container. + :param str blob_name: + Name of blob. + :param BlobPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_blob_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + resource_path = container_name + '/' + blob_name + + sas = _SharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(id) + sas.add_resource('b') + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_resource_signature(self.account_name, self.account_key, 'blob', resource_path) + + return sas.get_token() + + def generate_container(self, container_name, permission=None, expiry=None, + start=None, id=None, ip=None, protocol=None, + cache_control=None, content_disposition=None, + content_encoding=None, content_language=None, + content_type=None): + ''' + Generates a shared access signature for the container. + Use the returned signature with the sas_token parameter of any BlobService. + + :param str container_name: + Name of container. + :param ContainerPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered read, write, delete, list. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_blob_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + sas = _SharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(id) + sas.add_resource('c') + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_resource_signature(self.account_name, self.account_key, 'blob', container_name) + + return sas.get_token() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/__init__.py new file mode 100644 index 000000000000..797c97069ee1 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/__init__.py @@ -0,0 +1,38 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from ._constants import ( + __author__, + __version__, + DEFAULT_X_MS_VERSION, +) +from .cloudstorageaccount import CloudStorageAccount +from .models import ( + RetentionPolicy, + Logging, + Metrics, + CorsRule, + DeleteRetentionPolicy, + StaticWebsite, + ServiceProperties, + AccessPolicy, + ResourceTypes, + Services, + AccountPermissions, + Protocol, + ServiceStats, + GeoReplication, + LocationMode, + RetryContext, +) +from .retry import ( + ExponentialRetry, + LinearRetry, + no_retry, +) +from .sharedaccesssignature import ( + SharedAccessSignature, +) +from .tokencredential import TokenCredential diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_auth.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_auth.py new file mode 100644 index 000000000000..15c15b9ea560 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_auth.py @@ -0,0 +1,117 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from ._common_conversion import ( + _sign_string, +) +from ._constants import ( + DEV_ACCOUNT_NAME, + DEV_ACCOUNT_SECONDARY_NAME +) + +import logging +logger = logging.getLogger(__name__) + + +class _StorageSharedKeyAuthentication(object): + def __init__(self, account_name, account_key, is_emulated=False): + self.account_name = account_name + self.account_key = account_key + self.is_emulated = is_emulated + + def _get_headers(self, request, headers_to_sign): + headers = dict((name.lower(), value) for name, value in request.headers.items() if value) + if 'content-length' in headers and headers['content-length'] == '0': + del headers['content-length'] + return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' + + def _get_verb(self, request): + return request.method + '\n' + + def _get_canonicalized_resource(self, request): + uri_path = request.path.split('?')[0] + + # for emulator, use the DEV_ACCOUNT_NAME instead of DEV_ACCOUNT_SECONDARY_NAME + # as this is how the emulator works + if self.is_emulated and uri_path.find(DEV_ACCOUNT_SECONDARY_NAME) == 1: + # only replace the first instance + uri_path = uri_path.replace(DEV_ACCOUNT_SECONDARY_NAME, DEV_ACCOUNT_NAME, 1) + + return '/' + self.account_name + uri_path + + def _get_canonicalized_headers(self, request): + string_to_sign = '' + x_ms_headers = [] + for name, value in request.headers.items(): + if name.startswith('x-ms-'): + x_ms_headers.append((name.lower(), value)) + x_ms_headers.sort() + for name, value in x_ms_headers: + if value is not None: + string_to_sign += ''.join([name, ':', value, '\n']) + return string_to_sign + + def _add_authorization_header(self, request, string_to_sign): + signature = _sign_string(self.account_key, string_to_sign) + auth_string = 'SharedKey ' + self.account_name + ':' + signature + request.headers['Authorization'] = auth_string + + +class _StorageSharedKeyAuthentication(_StorageSharedKeyAuthentication): + def sign_request(self, request): + string_to_sign = \ + self._get_verb(request) + \ + self._get_headers( + request, + [ + 'content-encoding', 'content-language', 'content-length', + 'content-md5', 'content-type', 'date', 'if-modified-since', + 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' + ] + ) + \ + self._get_canonicalized_headers(request) + \ + self._get_canonicalized_resource(request) + \ + self._get_canonicalized_resource_query(request) + + self._add_authorization_header(request, string_to_sign) + logger.debug("String_to_sign=%s", string_to_sign) + + def _get_canonicalized_resource_query(self, request): + sorted_queries = [(name, value) for name, value in request.query.items()] + sorted_queries.sort() + + string_to_sign = '' + for name, value in sorted_queries: + if value is not None: + string_to_sign += '\n' + name.lower() + ':' + value + + return string_to_sign + + +class _StorageNoAuthentication(object): + def sign_request(self, request): + pass + + +class _StorageSASAuthentication(object): + def __init__(self, sas_token): + # ignore ?-prefix (added by tools such as Azure Portal) on sas tokens + # doing so avoids double question marks when signing + if sas_token[0] == '?': + self.sas_token = sas_token[1:] + else: + self.sas_token = sas_token + + def sign_request(self, request): + # if 'sig=' is present, then the request has already been signed + # as is the case when performing retries + if 'sig=' in request.path: + return + if '?' in request.path: + request.path += '&' + else: + request.path += '?' + + request.path += self.sas_token diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_common_conversion.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_common_conversion.py new file mode 100644 index 000000000000..8b50afbe1afb --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_common_conversion.py @@ -0,0 +1,126 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import hmac +import sys +from io import (SEEK_SET) + +from dateutil.tz import tzutc + +from ._error import ( + _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM, + _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM, +) +from .models import ( + _unicode_type, +) + +if sys.version_info < (3,): + def _str(value): + if isinstance(value, unicode): + return value.encode('utf-8') + + return str(value) +else: + _str = str + + +def _to_str(value): + return _str(value) if value is not None else None + + +def _int_to_str(value): + return str(int(value)) if value is not None else None + + +def _bool_to_str(value): + if value is None: + return None + + if isinstance(value, bool): + if value: + return 'true' + else: + return 'false' + + return str(value) + + +def _to_utc_datetime(value): + return value.strftime('%Y-%m-%dT%H:%M:%SZ') + + +def _datetime_to_utc_string(value): + # Azure expects the date value passed in to be UTC. + # Azure will always return values as UTC. + # If a date is passed in without timezone info, it is assumed to be UTC. + if value is None: + return None + + if value.tzinfo: + value = value.astimezone(tzutc()) + + return value.strftime('%a, %d %b %Y %H:%M:%S GMT') + + +def _encode_base64(data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def _decode_base64_to_bytes(data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + return base64.b64decode(data) + + +def _decode_base64_to_text(data): + decoded_bytes = _decode_base64_to_bytes(data) + return decoded_bytes.decode('utf-8') + + +def _sign_string(key, string_to_sign, key_is_base64=True): + if key_is_base64: + key = _decode_base64_to_bytes(key) + else: + if isinstance(key, _unicode_type): + key = key.encode('utf-8') + if isinstance(string_to_sign, _unicode_type): + string_to_sign = string_to_sign.encode('utf-8') + signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) + digest = signed_hmac_sha256.digest() + encoded_digest = _encode_base64(digest) + return encoded_digest + + +def _get_content_md5(data): + md5 = hashlib.md5() + if isinstance(data, bytes): + md5.update(data) + elif hasattr(data, 'read'): + pos = 0 + try: + pos = data.tell() + except: + pass + for chunk in iter(lambda: data.read(4096), b""): + md5.update(chunk) + try: + data.seek(pos, SEEK_SET) + except (AttributeError, IOError): + raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM.format('data')) + else: + raise ValueError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format('data')) + + return base64.b64encode(md5.digest()).decode('utf-8') + + +def _lower(text): + return text.lower() diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_connection.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_connection.py new file mode 100644 index 000000000000..1388fddeb625 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_connection.py @@ -0,0 +1,160 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import sys + +if sys.version_info >= (3,): + from urllib.parse import urlparse +else: + from urlparse import urlparse + +from ._constants import ( + SERVICE_HOST_BASE, + DEFAULT_PROTOCOL, + DEV_ACCOUNT_NAME, + DEV_ACCOUNT_SECONDARY_NAME, + DEV_ACCOUNT_KEY, + DEV_BLOB_HOST, + DEV_QUEUE_HOST, +) +from ._error import ( + _ERROR_STORAGE_MISSING_INFO, +) + +_EMULATOR_ENDPOINTS = { + 'blob': DEV_BLOB_HOST, + 'queue': DEV_QUEUE_HOST, + 'file': '', +} + +_CONNECTION_ENDPOINTS = { + 'blob': 'BlobEndpoint', + 'queue': 'QueueEndpoint', + 'file': 'FileEndpoint', +} + +_CONNECTION_ENDPOINTS_SECONDARY = { + 'blob': 'BlobSecondaryEndpoint', + 'queue': 'QueueSecondaryEndpoint', + 'file': 'FileSecondaryEndpoint', +} + + +class _ServiceParameters(object): + def __init__(self, service, account_name=None, account_key=None, sas_token=None, token_credential=None, + is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE, + custom_domain=None, custom_domain_secondary=None): + + self.account_name = account_name + self.account_key = account_key + self.sas_token = sas_token + self.token_credential = token_credential + self.protocol = protocol or DEFAULT_PROTOCOL + self.is_emulated = is_emulated + + if is_emulated: + self.account_name = DEV_ACCOUNT_NAME + self.protocol = 'http' + + # Only set the account key if a sas_token is not present to allow sas to be used with the emulator + self.account_key = DEV_ACCOUNT_KEY if not self.sas_token else None + + self.primary_endpoint = '{}/{}'.format(_EMULATOR_ENDPOINTS[service], DEV_ACCOUNT_NAME) + self.secondary_endpoint = '{}/{}'.format(_EMULATOR_ENDPOINTS[service], DEV_ACCOUNT_SECONDARY_NAME) + else: + # Strip whitespace from the key + if self.account_key: + self.account_key = self.account_key.strip() + + endpoint_suffix = endpoint_suffix or SERVICE_HOST_BASE + + # Setup the primary endpoint + if custom_domain: + parsed_url = urlparse(custom_domain) + + # Trim any trailing slashes from the path + path = parsed_url.path.rstrip('/') + + self.primary_endpoint = parsed_url.netloc + path + self.protocol = self.protocol if parsed_url.scheme is '' else parsed_url.scheme + else: + if not self.account_name: + raise ValueError(_ERROR_STORAGE_MISSING_INFO) + self.primary_endpoint = '{}.{}.{}'.format(self.account_name, service, endpoint_suffix) + + # Setup the secondary endpoint + if custom_domain_secondary: + if not custom_domain: + raise ValueError(_ERROR_STORAGE_MISSING_INFO) + + parsed_url = urlparse(custom_domain_secondary) + + # Trim any trailing slashes from the path + path = parsed_url.path.rstrip('/') + + self.secondary_endpoint = parsed_url.netloc + path + else: + if self.account_name: + self.secondary_endpoint = '{}-secondary.{}.{}'.format(self.account_name, service, endpoint_suffix) + else: + self.secondary_endpoint = None + + @staticmethod + def get_service_parameters(service, account_name=None, account_key=None, sas_token=None, token_credential= None, + is_emulated=None, protocol=None, endpoint_suffix=None, custom_domain=None, + request_session=None, connection_string=None, socket_timeout=None): + if connection_string: + params = _ServiceParameters._from_connection_string(connection_string, service) + elif is_emulated: + params = _ServiceParameters(service, is_emulated=True) + elif account_name: + if protocol.lower() != 'https' and token_credential is not None: + raise ValueError("Token credential is only supported with HTTPS.") + params = _ServiceParameters(service, + account_name=account_name, + account_key=account_key, + sas_token=sas_token, + token_credential=token_credential, + is_emulated=is_emulated, + protocol=protocol, + endpoint_suffix=endpoint_suffix, + custom_domain=custom_domain) + else: + raise ValueError(_ERROR_STORAGE_MISSING_INFO) + + params.request_session = request_session + params.socket_timeout = socket_timeout + return params + + @staticmethod + def _from_connection_string(connection_string, service): + # Split into key=value pairs removing empties, then split the pairs into a dict + config = dict(s.split('=', 1) for s in connection_string.split(';') if s) + + # Authentication + account_name = config.get('AccountName') + account_key = config.get('AccountKey') + sas_token = config.get('SharedAccessSignature') + + # Emulator + is_emulated = config.get('UseDevelopmentStorage') + + # Basic URL Configuration + protocol = config.get('DefaultEndpointsProtocol') + endpoint_suffix = config.get('EndpointSuffix') + + # Custom URLs + endpoint = config.get(_CONNECTION_ENDPOINTS[service]) + endpoint_secondary = config.get(_CONNECTION_ENDPOINTS_SECONDARY[service]) + + return _ServiceParameters(service, + account_name=account_name, + account_key=account_key, + sas_token=sas_token, + is_emulated=is_emulated, + protocol=protocol, + endpoint_suffix=endpoint_suffix, + custom_domain=endpoint, + custom_domain_secondary=endpoint_secondary) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_constants.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_constants.py new file mode 100644 index 000000000000..22516d640757 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_constants.py @@ -0,0 +1,47 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import platform +import sys + +__author__ = 'Microsoft Corp. ' +__version__ = '1.3.0' + +# UserAgent string sample: 'Azure-Storage/0.37.0-0.38.0 (Python CPython 3.4.2; Windows 8)' +# First version(0.37.0) is the common package, and the second version(0.38.0) is the service package +USER_AGENT_STRING_PREFIX = 'Azure-Storage/{}-'.format(__version__) +USER_AGENT_STRING_SUFFIX = '(Python {} {}; {} {})'.format(platform.python_implementation(), + platform.python_version(), platform.system(), + platform.release()) + +# default values for common package, in case it is used directly +DEFAULT_X_MS_VERSION = '2018-03-28' +DEFAULT_USER_AGENT_STRING = '{}None {}'.format(USER_AGENT_STRING_PREFIX, USER_AGENT_STRING_SUFFIX) + +# Live ServiceClient URLs +SERVICE_HOST_BASE = 'core.windows.net' +DEFAULT_PROTOCOL = 'https' + +# Development ServiceClient URLs +DEV_BLOB_HOST = '127.0.0.1:10000' +DEV_QUEUE_HOST = '127.0.0.1:10001' + +# Default credentials for Development Storage Service +DEV_ACCOUNT_NAME = 'devstoreaccount1' +DEV_ACCOUNT_SECONDARY_NAME = 'devstoreaccount1-secondary' +DEV_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==' + +# Socket timeout in seconds +DEFAULT_SOCKET_TIMEOUT = 20 + +# for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) +# The socket timeout is now the maximum total duration to send all data. +if sys.version_info >= (3, 5): + # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds + # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) + DEFAULT_SOCKET_TIMEOUT = (20, 2000) + +# Encryption constants +_ENCRYPTION_PROTOCOL_V1 = '1.0' diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_deserialization.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_deserialization.py new file mode 100644 index 000000000000..80803da3e438 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_deserialization.py @@ -0,0 +1,384 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from dateutil import parser + +from ._common_conversion import _to_str + +try: + from xml.etree import cElementTree as ETree +except ImportError: + from xml.etree import ElementTree as ETree + +from .models import ( + ServiceProperties, + Logging, + Metrics, + CorsRule, + AccessPolicy, + _dict, + GeoReplication, + ServiceStats, + DeleteRetentionPolicy, + StaticWebsite, +) + + +def _to_int(value): + return value if value is None else int(value) + + +def _bool(value): + return value.lower() == 'true' + + +def _to_upper_str(value): + return _to_str(value).upper() if value is not None else None + + +def _get_download_size(start_range, end_range, resource_size): + if start_range is not None: + end_range = end_range if end_range else (resource_size if resource_size else None) + if end_range is not None: + return end_range - start_range + else: + return None + else: + return resource_size + + +GET_PROPERTIES_ATTRIBUTE_MAP = { + 'last-modified': (None, 'last_modified', parser.parse), + 'etag': (None, 'etag', _to_str), + 'x-ms-blob-type': (None, 'blob_type', _to_str), + 'content-length': (None, 'content_length', _to_int), + 'content-range': (None, 'content_range', _to_str), + 'x-ms-blob-sequence-number': (None, 'page_blob_sequence_number', _to_int), + 'x-ms-blob-committed-block-count': (None, 'append_blob_committed_block_count', _to_int), + 'x-ms-blob-public-access': (None, 'public_access', _to_str), + 'x-ms-access-tier': (None, 'blob_tier', _to_str), + 'x-ms-access-tier-change-time': (None, 'blob_tier_change_time', parser.parse), + 'x-ms-access-tier-inferred': (None, 'blob_tier_inferred', _bool), + 'x-ms-archive-status': (None, 'rehydration_status', _to_str), + 'x-ms-share-quota': (None, 'quota', _to_int), + 'x-ms-server-encrypted': (None, 'server_encrypted', _bool), + 'x-ms-creation-time': (None, 'creation_time', parser.parse), + 'content-type': ('content_settings', 'content_type', _to_str), + 'cache-control': ('content_settings', 'cache_control', _to_str), + 'content-encoding': ('content_settings', 'content_encoding', _to_str), + 'content-disposition': ('content_settings', 'content_disposition', _to_str), + 'content-language': ('content_settings', 'content_language', _to_str), + 'content-md5': ('content_settings', 'content_md5', _to_str), + 'x-ms-lease-status': ('lease', 'status', _to_str), + 'x-ms-lease-state': ('lease', 'state', _to_str), + 'x-ms-lease-duration': ('lease', 'duration', _to_str), + 'x-ms-copy-id': ('copy', 'id', _to_str), + 'x-ms-copy-source': ('copy', 'source', _to_str), + 'x-ms-copy-status': ('copy', 'status', _to_str), + 'x-ms-copy-progress': ('copy', 'progress', _to_str), + 'x-ms-copy-completion-time': ('copy', 'completion_time', parser.parse), + 'x-ms-copy-destination-snapshot': ('copy', 'destination_snapshot_time', _to_str), + 'x-ms-copy-status-description': ('copy', 'status_description', _to_str), + 'x-ms-has-immutability-policy': (None, 'has_immutability_policy', _bool), + 'x-ms-has-legal-hold': (None, 'has_legal_hold', _bool), +} + + +def _parse_metadata(response): + ''' + Extracts out resource metadata information. + ''' + + if response is None or response.headers is None: + return None + + metadata = _dict() + for key, value in response.headers.items(): + if key.lower().startswith('x-ms-meta-'): + metadata[key[10:]] = _to_str(value) + + return metadata + + +def _parse_properties(response, result_class): + ''' + Extracts out resource properties and metadata information. + Ignores the standard http headers. + ''' + + if response is None or response.headers is None: + return None + + props = result_class() + for key, value in response.headers.items(): + info = GET_PROPERTIES_ATTRIBUTE_MAP.get(key) + if info: + if info[0] is None: + setattr(props, info[1], info[2](value)) + else: + attr = getattr(props, info[0]) + setattr(attr, info[1], info[2](value)) + + if hasattr(props, 'blob_type') and props.blob_type == 'PageBlob' and hasattr(props, 'blob_tier') and props.blob_tier is not None: + props.blob_tier = _to_upper_str(props.blob_tier) + return props + + +def _parse_length_from_content_range(content_range): + ''' + Parses the blob length from the content range header: bytes 1-3/65537 + ''' + if content_range is None: + return None + + # First, split in space and take the second half: '1-3/65537' + # Next, split on slash and take the second half: '65537' + # Finally, convert to an int: 65537 + return int(content_range.split(' ', 1)[1].split('/', 1)[1]) + + +def _convert_xml_to_signed_identifiers(response): + ''' + + + + unique-value + + start-time + expiry-time + abbreviated-permission-list + + + + ''' + if response is None or response.body is None: + return None + + list_element = ETree.fromstring(response.body) + signed_identifiers = _dict() + + for signed_identifier_element in list_element.findall('SignedIdentifier'): + # Id element + id = signed_identifier_element.find('Id').text + + # Access policy element + access_policy = AccessPolicy() + access_policy_element = signed_identifier_element.find('AccessPolicy') + if access_policy_element is not None: + start_element = access_policy_element.find('Start') + if start_element is not None: + access_policy.start = parser.parse(start_element.text) + + expiry_element = access_policy_element.find('Expiry') + if expiry_element is not None: + access_policy.expiry = parser.parse(expiry_element.text) + + access_policy.permission = access_policy_element.findtext('Permission') + + signed_identifiers[id] = access_policy + + return signed_identifiers + + +def _convert_xml_to_service_stats(response): + ''' + + + + live|bootstrap|unavailable + sync-time| + + + ''' + if response is None or response.body is None: + return None + + service_stats_element = ETree.fromstring(response.body) + + geo_replication_element = service_stats_element.find('GeoReplication') + + geo_replication = GeoReplication() + geo_replication.status = geo_replication_element.find('Status').text + last_sync_time = geo_replication_element.find('LastSyncTime').text + geo_replication.last_sync_time = parser.parse(last_sync_time) if last_sync_time else None + + service_stats = ServiceStats() + service_stats.geo_replication = geo_replication + return service_stats + + +def _convert_xml_to_service_properties(response): + ''' + + + + version-number + true|false + true|false + true|false + + true|false + number-of-days + + + + version-number + true|false + true|false + + true|false + number-of-days + + + + version-number + true|false + true|false + + true|false + number-of-days + + + + + comma-separated-list-of-allowed-origins + comma-separated-list-of-HTTP-verb + max-caching-age-in-seconds + comma-seperated-list-of-response-headers + comma-seperated-list-of-request-headers + + + + true|false + number-of-days + + + true|false + + + + + ''' + if response is None or response.body is None: + return None + + service_properties_element = ETree.fromstring(response.body) + service_properties = ServiceProperties() + + # Logging + logging = service_properties_element.find('Logging') + if logging is not None: + service_properties.logging = Logging() + service_properties.logging.version = logging.find('Version').text + service_properties.logging.delete = _bool(logging.find('Delete').text) + service_properties.logging.read = _bool(logging.find('Read').text) + service_properties.logging.write = _bool(logging.find('Write').text) + + _convert_xml_to_retention_policy(logging.find('RetentionPolicy'), + service_properties.logging.retention_policy) + # HourMetrics + hour_metrics_element = service_properties_element.find('HourMetrics') + if hour_metrics_element is not None: + service_properties.hour_metrics = Metrics() + _convert_xml_to_metrics(hour_metrics_element, service_properties.hour_metrics) + + # MinuteMetrics + minute_metrics_element = service_properties_element.find('MinuteMetrics') + if minute_metrics_element is not None: + service_properties.minute_metrics = Metrics() + _convert_xml_to_metrics(minute_metrics_element, service_properties.minute_metrics) + + # CORS + cors = service_properties_element.find('Cors') + if cors is not None: + service_properties.cors = list() + for rule in cors.findall('CorsRule'): + allowed_origins = rule.find('AllowedOrigins').text.split(',') + + allowed_methods = rule.find('AllowedMethods').text.split(',') + + max_age_in_seconds = int(rule.find('MaxAgeInSeconds').text) + + cors_rule = CorsRule(allowed_origins, allowed_methods, max_age_in_seconds) + + exposed_headers = rule.find('ExposedHeaders').text + if exposed_headers is not None: + cors_rule.exposed_headers = exposed_headers.split(',') + + allowed_headers = rule.find('AllowedHeaders').text + if allowed_headers is not None: + cors_rule.allowed_headers = allowed_headers.split(',') + + service_properties.cors.append(cors_rule) + + # Target version + target_version = service_properties_element.find('DefaultServiceVersion') + if target_version is not None: + service_properties.target_version = target_version.text + + # DeleteRetentionPolicy + delete_retention_policy_element = service_properties_element.find('DeleteRetentionPolicy') + if delete_retention_policy_element is not None: + service_properties.delete_retention_policy = DeleteRetentionPolicy() + policy_enabled = _bool(delete_retention_policy_element.find('Enabled').text) + service_properties.delete_retention_policy.enabled = policy_enabled + + if policy_enabled: + service_properties.delete_retention_policy.days = int(delete_retention_policy_element.find('Days').text) + + # StaticWebsite + static_website_element = service_properties_element.find('StaticWebsite') + if static_website_element is not None: + service_properties.static_website = StaticWebsite() + service_properties.static_website.enabled = _bool(static_website_element.find('Enabled').text) + + index_document_element = static_website_element.find('IndexDocument') + if index_document_element is not None: + service_properties.static_website.index_document = index_document_element.text + + error_document_element = static_website_element.find('ErrorDocument404Path') + if error_document_element is not None: + service_properties.static_website.error_document_404_path = error_document_element.text + + return service_properties + + +def _convert_xml_to_metrics(xml, metrics): + ''' + version-number + true|false + true|false + + true|false + number-of-days + + ''' + # Version + metrics.version = xml.find('Version').text + + # Enabled + metrics.enabled = _bool(xml.find('Enabled').text) + + # IncludeAPIs + include_apis_element = xml.find('IncludeAPIs') + if include_apis_element is not None: + metrics.include_apis = _bool(include_apis_element.text) + + # RetentionPolicy + _convert_xml_to_retention_policy(xml.find('RetentionPolicy'), metrics.retention_policy) + + +def _convert_xml_to_retention_policy(xml, retention_policy): + ''' + true|false + number-of-days + ''' + # Enabled + retention_policy.enabled = _bool(xml.find('Enabled').text) + + # Days + days_element = xml.find('Days') + if days_element is not None: + retention_policy.days = int(days_element.text) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_encryption.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_encryption.py new file mode 100644 index 000000000000..cd7d92e66e0e --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_encryption.py @@ -0,0 +1,233 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from collections import OrderedDict + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import Cipher +from cryptography.hazmat.primitives.ciphers.algorithms import AES +from cryptography.hazmat.primitives.ciphers.modes import CBC + +from ._common_conversion import ( + _encode_base64, + _decode_base64_to_bytes, +) +from ._constants import ( + _ENCRYPTION_PROTOCOL_V1, + __version__, +) +from ._error import ( + _ERROR_UNSUPPORTED_ENCRYPTION_VERSION, + _validate_not_none, + _validate_encryption_protocol_version, + _validate_key_encryption_key_unwrap, + _validate_kek_id, +) + + +class _EncryptionAlgorithm(object): + ''' + Specifies which client encryption algorithm is used. + ''' + AES_CBC_256 = 'AES_CBC_256' + + +class _WrappedContentKey: + ''' + Represents the envelope key details stored on the service. + ''' + + def __init__(self, algorithm, encrypted_key, key_id): + ''' + :param str algorithm: + The algorithm used for wrapping. + :param bytes encrypted_key: + The encrypted content-encryption-key. + :param str key_id: + The key-encryption-key identifier string. + ''' + + _validate_not_none('algorithm', algorithm) + _validate_not_none('encrypted_key', encrypted_key) + _validate_not_none('key_id', key_id) + + self.algorithm = algorithm + self.encrypted_key = encrypted_key + self.key_id = key_id + + +class _EncryptionAgent: + ''' + Represents the encryption agent stored on the service. + It consists of the encryption protocol version and encryption algorithm used. + ''' + + def __init__(self, encryption_algorithm, protocol): + ''' + :param _EncryptionAlgorithm encryption_algorithm: + The algorithm used for encrypting the message contents. + :param str protocol: + The protocol version used for encryption. + ''' + + _validate_not_none('encryption_algorithm', encryption_algorithm) + _validate_not_none('protocol', protocol) + + self.encryption_algorithm = str(encryption_algorithm) + self.protocol = protocol + + +class _EncryptionData: + ''' + Represents the encryption data that is stored on the service. + ''' + + def __init__(self, content_encryption_IV, encryption_agent, wrapped_content_key, + key_wrapping_metadata): + ''' + :param bytes content_encryption_IV: + The content encryption initialization vector. + :param _EncryptionAgent encryption_agent: + The encryption agent. + :param _WrappedContentKey wrapped_content_key: + An object that stores the wrapping algorithm, the key identifier, + and the encrypted key bytes. + :param dict key_wrapping_metadata: + A dict containing metadata related to the key wrapping. + ''' + + _validate_not_none('content_encryption_IV', content_encryption_IV) + _validate_not_none('encryption_agent', encryption_agent) + _validate_not_none('wrapped_content_key', wrapped_content_key) + + self.content_encryption_IV = content_encryption_IV + self.encryption_agent = encryption_agent + self.wrapped_content_key = wrapped_content_key + self.key_wrapping_metadata = key_wrapping_metadata + + +def _generate_encryption_data_dict(kek, cek, iv): + ''' + Generates and returns the encryption metadata as a dict. + + :param object kek: The key encryption key. See calling functions for more information. + :param bytes cek: The content encryption key. + :param bytes iv: The initialization vector. + :return: A dict containing all the encryption metadata. + :rtype: dict + ''' + # Encrypt the cek. + wrapped_cek = kek.wrap_key(cek) + + # Build the encryption_data dict. + # Use OrderedDict to comply with Java's ordering requirement. + wrapped_content_key = OrderedDict() + wrapped_content_key['KeyId'] = kek.get_kid() + wrapped_content_key['EncryptedKey'] = _encode_base64(wrapped_cek) + wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() + + encryption_agent = OrderedDict() + encryption_agent['Protocol'] = _ENCRYPTION_PROTOCOL_V1 + encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 + + encryption_data_dict = OrderedDict() + encryption_data_dict['WrappedContentKey'] = wrapped_content_key + encryption_data_dict['EncryptionAgent'] = encryption_agent + encryption_data_dict['ContentEncryptionIV'] = _encode_base64(iv) + encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + __version__} + + return encryption_data_dict + + +def _dict_to_encryption_data(encryption_data_dict): + ''' + Converts the specified dictionary to an EncryptionData object for + eventual use in decryption. + + :param dict encryption_data_dict: + The dictionary containing the encryption data. + :return: an _EncryptionData object built from the dictionary. + :rtype: _EncryptionData + ''' + try: + if encryption_data_dict['EncryptionAgent']['Protocol'] != _ENCRYPTION_PROTOCOL_V1: + raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION) + except KeyError: + raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION) + wrapped_content_key = encryption_data_dict['WrappedContentKey'] + wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], + _decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), + wrapped_content_key['KeyId']) + + encryption_agent = encryption_data_dict['EncryptionAgent'] + encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], + encryption_agent['Protocol']) + + if 'KeyWrappingMetadata' in encryption_data_dict: + key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] + else: + key_wrapping_metadata = None + + encryption_data = _EncryptionData(_decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']), + encryption_agent, + wrapped_content_key, + key_wrapping_metadata) + + return encryption_data + + +def _generate_AES_CBC_cipher(cek, iv): + ''' + Generates and returns an encryption cipher for AES CBC using the given cek and iv. + + :param bytes[] cek: The content encryption key for the cipher. + :param bytes[] iv: The initialization vector for the cipher. + :return: A cipher for encrypting in AES256 CBC. + :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher + ''' + + backend = default_backend() + algorithm = AES(cek) + mode = CBC(iv) + return Cipher(algorithm, mode, backend) + + +def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): + ''' + Extracts and returns the content_encryption_key stored in the encryption_data object + and performs necessary validation on all parameters. + :param _EncryptionData encryption_data: + The encryption metadata of the retrieved value. + :param obj key_encryption_key: + The key_encryption_key used to unwrap the cek. Please refer to high-level service object + instance variables for more details. + :param func key_resolver: + A function used that, given a key_id, will return a key_encryption_key. Please refer + to high-level service object instance variables for more details. + :return: the content_encryption_key stored in the encryption_data object. + :rtype: bytes[] + ''' + + _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) + _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) + + _validate_encryption_protocol_version(encryption_data.encryption_agent.protocol) + + content_encryption_key = None + + # If the resolver exists, give priority to the key it finds. + if key_resolver is not None: + key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) + + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_unwrap(key_encryption_key) + _validate_kek_id(encryption_data.wrapped_content_key.key_id, key_encryption_key.get_kid()) + + # Will throw an exception if the specified algorithm is not supported. + content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, + encryption_data.wrapped_content_key.algorithm) + _validate_not_none('content_encryption_key', content_encryption_key) + + return content_encryption_key diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_error.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_error.py new file mode 100644 index 000000000000..90faa0124ab2 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_error.py @@ -0,0 +1,183 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from sys import version_info + +if version_info < (3,): + def _str(value): + if isinstance(value, unicode): + return value.encode('utf-8') + + return str(value) +else: + _str = str + + +def _to_str(value): + return _str(value) if value is not None else None + + +from azure.common import ( + AzureHttpError, + AzureConflictHttpError, + AzureMissingResourceHttpError, + AzureException, +) +from ._constants import ( + _ENCRYPTION_PROTOCOL_V1, +) + +_ERROR_CONFLICT = 'Conflict ({0})' +_ERROR_NOT_FOUND = 'Not found ({0})' +_ERROR_UNKNOWN = 'Unknown error ({0})' +_ERROR_STORAGE_MISSING_INFO = \ + 'You need to provide an account name and either an account_key or sas_token when creating a storage service.' +_ERROR_EMULATOR_DOES_NOT_SUPPORT_FILES = \ + 'The emulator does not support the file service.' +_ERROR_ACCESS_POLICY = \ + 'share_access_policy must be either SignedIdentifier or AccessPolicy ' + \ + 'instance' +_ERROR_PARALLEL_NOT_SEEKABLE = 'Parallel operations require a seekable stream.' +_ERROR_VALUE_SHOULD_BE_BYTES = '{0} should be of type bytes.' +_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM = '{0} should be of type bytes or a readable file-like/io.IOBase stream object.' +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' +_ERROR_VALUE_SHOULD_BE_STREAM = '{0} should be a file-like/io.IOBase type stream object with a read method.' +_ERROR_VALUE_NONE = '{0} should not be None.' +_ERROR_VALUE_NONE_OR_EMPTY = '{0} should not be None or empty.' +_ERROR_VALUE_NEGATIVE = '{0} should not be negative.' +_ERROR_START_END_NEEDED_FOR_MD5 = \ + 'Both end_range and start_range need to be specified ' + \ + 'for getting content MD5.' +_ERROR_RANGE_TOO_LARGE_FOR_MD5 = \ + 'Getting content MD5 for a range greater than 4MB ' + \ + 'is not supported.' +_ERROR_MD5_MISMATCH = \ + 'MD5 mismatch. Expected value is \'{0}\', computed value is \'{1}\'.' +_ERROR_TOO_MANY_ACCESS_POLICIES = \ + 'Too many access policies provided. The server does not support setting more than 5 access policies on a single resource.' +_ERROR_OBJECT_INVALID = \ + '{0} does not define a complete interface. Value of {1} is either missing or invalid.' +_ERROR_UNSUPPORTED_ENCRYPTION_VERSION = \ + 'Encryption version is not supported.' +_ERROR_DECRYPTION_FAILURE = \ + 'Decryption failed' +_ERROR_ENCRYPTION_REQUIRED = \ + 'Encryption required but no key was provided.' +_ERROR_DECRYPTION_REQUIRED = \ + 'Decryption required but neither key nor resolver was provided.' + \ + ' If you do not want to decypt, please do not set the require encryption flag.' +_ERROR_INVALID_KID = \ + 'Provided or resolved key-encryption-key does not match the id of key used to encrypt.' +_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM = \ + 'Specified encryption algorithm is not supported.' +_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = 'The require_encryption flag is set, but encryption is not supported' + \ + ' for this method.' +_ERROR_UNKNOWN_KEY_WRAP_ALGORITHM = 'Unknown key wrap algorithm.' +_ERROR_DATA_NOT_ENCRYPTED = 'Encryption required, but received data does not contain appropriate metatadata.' + \ + 'Data was either not encrypted or metadata has been lost.' + + +def _dont_fail_on_exist(error): + ''' don't throw exception if the resource exists. + This is called by create_* APIs with fail_on_exist=False''' + if isinstance(error, AzureConflictHttpError): + return False + else: + raise error + + +def _dont_fail_not_exist(error): + ''' don't throw exception if the resource doesn't exist. + This is called by create_* APIs with fail_on_exist=False''' + if isinstance(error, AzureMissingResourceHttpError): + return False + else: + raise error + + +def _http_error_handler(http_error): + ''' Simple error handler for azure.''' + message = str(http_error) + error_code = None + + if 'x-ms-error-code' in http_error.respheader: + error_code = http_error.respheader['x-ms-error-code'] + message += ' ErrorCode: ' + error_code + + if http_error.respbody is not None: + message += '\n' + http_error.respbody.decode('utf-8-sig') + + ex = AzureHttpError(message, http_error.status) + ex.error_code = error_code + + raise ex + + +def _validate_type_bytes(param_name, param): + if not isinstance(param, bytes): + raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name)) + + +def _validate_type_bytes_or_stream(param_name, param): + if not (isinstance(param, bytes) or hasattr(param, 'read')): + raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name)) + + +def _validate_not_none(param_name, param): + if param is None: + raise ValueError(_ERROR_VALUE_NONE.format(param_name)) + + +def _validate_content_match(server_md5, computed_md5): + if server_md5 != computed_md5: + raise AzureException(_ERROR_MD5_MISMATCH.format(server_md5, computed_md5)) + + +def _validate_access_policies(identifiers): + if identifiers and len(identifiers) > 5: + raise AzureException(_ERROR_TOO_MANY_ACCESS_POLICIES) + + +def _validate_key_encryption_key_wrap(kek): + # Note that None is not callable and so will fail the second clause of each check. + if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) + if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) + + +def _validate_key_encryption_key_unwrap(kek): + if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(kek, 'unwrap_key') or not callable(kek.unwrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) + + +def _validate_encryption_required(require_encryption, kek): + if require_encryption and (kek is None): + raise ValueError(_ERROR_ENCRYPTION_REQUIRED) + + +def _validate_decryption_required(require_encryption, kek, resolver): + if (require_encryption and (kek is None) and + (resolver is None)): + raise ValueError(_ERROR_DECRYPTION_REQUIRED) + + +def _validate_encryption_protocol_version(encryption_protocol): + if not (_ENCRYPTION_PROTOCOL_V1 == encryption_protocol): + raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_VERSION) + + +def _validate_kek_id(kid, resolved_id): + if not (kid == resolved_id): + raise ValueError(_ERROR_INVALID_KID) + + +def _validate_encryption_unsupported(require_encryption, key_encryption_key): + if require_encryption or (key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_http/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_http/__init__.py new file mode 100644 index 000000000000..2990ec80abe0 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_http/__init__.py @@ -0,0 +1,74 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + + +class HTTPError(Exception): + ''' + Represents an HTTP Exception when response status code >= 300. + + :ivar int status: + the status code of the response + :ivar str message: + the message + :ivar list headers: + the returned headers, as a list of (name, value) pairs + :ivar bytes body: + the body of the response + ''' + + def __init__(self, status, message, respheader, respbody): + self.status = status + self.respheader = respheader + self.respbody = respbody + Exception.__init__(self, message) + + +class HTTPResponse(object): + ''' + Represents a response from an HTTP request. + + :ivar int status: + the status code of the response + :ivar str message: + the message + :ivar dict headers: + the returned headers + :ivar bytes body: + the body of the response + ''' + + def __init__(self, status, message, headers, body): + self.status = status + self.message = message + self.headers = headers + self.body = body + + +class HTTPRequest(object): + ''' + Represents an HTTP Request. + + :ivar str host: + the host name to connect to + :ivar str method: + the method to use to connect (string such as GET, POST, PUT, etc.) + :ivar str path: + the uri fragment + :ivar dict query: + query parameters + :ivar dict headers: + header values + :ivar bytes body: + the body of the request. + ''' + + def __init__(self): + self.host = '' + self.method = '' + self.path = '' + self.query = {} # list of (name, value) + self.headers = {} # list of (header name, header value) + self.body = '' diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_http/httpclient.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_http/httpclient.py new file mode 100644 index 000000000000..b5847660e296 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_http/httpclient.py @@ -0,0 +1,107 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import logging +from . import HTTPResponse +from .._serialization import _get_data_bytes_or_stream_only +logger = logging.getLogger(__name__) + + +class _HTTPClient(object): + ''' + Takes the request and sends it to cloud service and returns the response. + ''' + + def __init__(self, protocol=None, session=None, timeout=None): + ''' + :param str protocol: + http or https. + :param requests.Session session: + session object created with requests library (or compatible). + :param int timeout: + timeout for the http request, in seconds. + ''' + self.protocol = protocol + self.session = session + self.timeout = timeout + + # By default, requests adds an Accept:*/* and Accept-Encoding to the session, + # which causes issues with some Azure REST APIs. Removing these here gives us + # the flexibility to add it back on a case by case basis. + if 'Accept' in self.session.headers: + del self.session.headers['Accept'] + + if 'Accept-Encoding' in self.session.headers: + del self.session.headers['Accept-Encoding'] + + self.proxies = None + + def set_proxy(self, host, port, user, password): + ''' + Sets the proxy server host and port for the HTTP CONNECT Tunnelling. + + Note that we set the proxies directly on the request later on rather than + using the session object as requests has a bug where session proxy is ignored + in favor of environment proxy. So, auth will not work unless it is passed + directly when making the request as this overrides both. + + :param str host: + Address of the proxy. Ex: '192.168.0.100' + :param int port: + Port of the proxy. Ex: 6000 + :param str user: + User for proxy authorization. + :param str password: + Password for proxy authorization. + ''' + if user and password: + proxy_string = '{}:{}@{}:{}'.format(user, password, host, port) + else: + proxy_string = '{}:{}'.format(host, port) + + self.proxies = {'http': 'http://{}'.format(proxy_string), + 'https': 'https://{}'.format(proxy_string)} + + def perform_request(self, request): + ''' + Sends an HTTPRequest to Azure Storage and returns an HTTPResponse. If + the response code indicates an error, raise an HTTPError. + + :param HTTPRequest request: + The request to serialize and send. + :return: An HTTPResponse containing the parsed HTTP response. + :rtype: :class:`~azure.storage.common._http.HTTPResponse` + ''' + # Verify the body is in bytes or either a file-like/stream object + if request.body: + request.body = _get_data_bytes_or_stream_only('request.body', request.body) + + # Construct the URI + uri = self.protocol.lower() + '://' + request.host + request.path + + # Send the request + response = self.session.request(request.method, + uri, + params=request.query, + headers=request.headers, + data=request.body or None, + timeout=self.timeout, + proxies=self.proxies) + + # Parse the response + status = int(response.status_code) + response_headers = {} + for key, name in response.headers.items(): + # Preserve the case of metadata + if key.lower().startswith('x-ms-meta-'): + response_headers[key] = name + else: + response_headers[key.lower()] = name + + wrap = HTTPResponse(status, response.reason, response_headers, response.content) + response.close() + + return wrap diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_serialization.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_serialization.py new file mode 100644 index 000000000000..af27ce5b0089 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_serialization.py @@ -0,0 +1,371 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import sys +import uuid +from datetime import date +from io import (BytesIO, IOBase, SEEK_SET, SEEK_END, UnsupportedOperation) +from os import fstat +from time import time +from wsgiref.handlers import format_date_time + +from dateutil.tz import tzutc + +if sys.version_info >= (3,): + from urllib.parse import quote as url_quote +else: + from urllib2 import quote as url_quote + +try: + from xml.etree import cElementTree as ETree +except ImportError: + from xml.etree import ElementTree as ETree + +from ._error import ( + _ERROR_VALUE_SHOULD_BE_BYTES, + _ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM, + _ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM +) +from .models import ( + _unicode_type, +) +from ._common_conversion import ( + _str, +) + + +def _to_utc_datetime(value): + # Azure expects the date value passed in to be UTC. + # Azure will always return values as UTC. + # If a date is passed in without timezone info, it is assumed to be UTC. + if value.tzinfo: + value = value.astimezone(tzutc()) + return value.strftime('%Y-%m-%dT%H:%M:%SZ') + + +def _update_request(request, x_ms_version, user_agent_string): + # Verify body + if request.body: + request.body = _get_data_bytes_or_stream_only('request.body', request.body) + length = _len_plus(request.body) + + # only scenario where this case is plausible is if the stream object is not seekable. + if length is None: + raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM) + + # if it is PUT, POST, MERGE, DELETE, need to add content-length to header. + if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']: + request.headers['Content-Length'] = str(length) + + # append addtional headers based on the service + request.headers['x-ms-version'] = x_ms_version + request.headers['User-Agent'] = user_agent_string + request.headers['x-ms-client-request-id'] = str(uuid.uuid1()) + + # If the host has a path component (ex local storage), move it + path = request.host.split('/', 1) + if len(path) == 2: + request.host = path[0] + request.path = '/{}{}'.format(path[1], request.path) + + # Encode and optionally add local storage prefix to path + request.path = url_quote(request.path, '/()$=\',~') + + +def _add_metadata_headers(metadata, request): + if metadata: + if not request.headers: + request.headers = {} + for name, value in metadata.items(): + request.headers['x-ms-meta-' + name] = value + + +def _add_date_header(request): + current_time = format_date_time(time()) + request.headers['x-ms-date'] = current_time + + +def _get_data_bytes_only(param_name, param_value): + '''Validates the request body passed in and converts it to bytes + if our policy allows it.''' + if param_value is None: + return b'' + + if isinstance(param_value, bytes): + return param_value + + raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name)) + + +def _get_data_bytes_or_stream_only(param_name, param_value): + '''Validates the request body passed in is a stream/file-like or bytes + object.''' + if param_value is None: + return b'' + + if isinstance(param_value, bytes) or hasattr(param_value, 'read'): + return param_value + + raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES_OR_STREAM.format(param_name)) + + +def _get_request_body(request_body): + '''Converts an object into a request body. If it's None + we'll return an empty string, if it's one of our objects it'll + convert it to XML and return it. Otherwise we just use the object + directly''' + if request_body is None: + return b'' + + if isinstance(request_body, bytes) or isinstance(request_body, IOBase): + return request_body + + if isinstance(request_body, _unicode_type): + return request_body.encode('utf-8') + + request_body = str(request_body) + if isinstance(request_body, _unicode_type): + return request_body.encode('utf-8') + + return request_body + + +def _convert_signed_identifiers_to_xml(signed_identifiers): + if signed_identifiers is None: + return '' + + sis = ETree.Element('SignedIdentifiers') + for id, access_policy in signed_identifiers.items(): + # Root signed identifers element + si = ETree.SubElement(sis, 'SignedIdentifier') + + # Id element + ETree.SubElement(si, 'Id').text = id + + # Access policy element + policy = ETree.SubElement(si, 'AccessPolicy') + + if access_policy.start: + start = access_policy.start + if isinstance(access_policy.start, date): + start = _to_utc_datetime(start) + ETree.SubElement(policy, 'Start').text = start + + if access_policy.expiry: + expiry = access_policy.expiry + if isinstance(access_policy.expiry, date): + expiry = _to_utc_datetime(expiry) + ETree.SubElement(policy, 'Expiry').text = expiry + + if access_policy.permission: + ETree.SubElement(policy, 'Permission').text = _str(access_policy.permission) + + # Add xml declaration and serialize + try: + stream = BytesIO() + ETree.ElementTree(sis).write(stream, xml_declaration=True, encoding='utf-8', method='xml') + except: + raise + finally: + output = stream.getvalue() + stream.close() + + return output + + +def _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, + cors, target_version=None, delete_retention_policy=None, static_website=None): + ''' + + + + version-number + true|false + true|false + true|false + + true|false + number-of-days + + + + version-number + true|false + true|false + + true|false + number-of-days + + + + version-number + true|false + true|false + + true|false + number-of-days + + + + + comma-separated-list-of-allowed-origins + comma-separated-list-of-HTTP-verb + max-caching-age-in-seconds + comma-seperated-list-of-response-headers + comma-seperated-list-of-request-headers + + + + true|false + number-of-days + + + true|false + + + + + ''' + service_properties_element = ETree.Element('StorageServiceProperties') + + # Logging + if logging: + logging_element = ETree.SubElement(service_properties_element, 'Logging') + ETree.SubElement(logging_element, 'Version').text = logging.version + ETree.SubElement(logging_element, 'Delete').text = str(logging.delete) + ETree.SubElement(logging_element, 'Read').text = str(logging.read) + ETree.SubElement(logging_element, 'Write').text = str(logging.write) + + retention_element = ETree.SubElement(logging_element, 'RetentionPolicy') + _convert_retention_policy_to_xml(logging.retention_policy, retention_element) + + # HourMetrics + if hour_metrics: + hour_metrics_element = ETree.SubElement(service_properties_element, 'HourMetrics') + _convert_metrics_to_xml(hour_metrics, hour_metrics_element) + + # MinuteMetrics + if minute_metrics: + minute_metrics_element = ETree.SubElement(service_properties_element, 'MinuteMetrics') + _convert_metrics_to_xml(minute_metrics, minute_metrics_element) + + # CORS + # Make sure to still serialize empty list + if cors is not None: + cors_element = ETree.SubElement(service_properties_element, 'Cors') + for rule in cors: + cors_rule = ETree.SubElement(cors_element, 'CorsRule') + ETree.SubElement(cors_rule, 'AllowedOrigins').text = ",".join(rule.allowed_origins) + ETree.SubElement(cors_rule, 'AllowedMethods').text = ",".join(rule.allowed_methods) + ETree.SubElement(cors_rule, 'MaxAgeInSeconds').text = str(rule.max_age_in_seconds) + ETree.SubElement(cors_rule, 'ExposedHeaders').text = ",".join(rule.exposed_headers) + ETree.SubElement(cors_rule, 'AllowedHeaders').text = ",".join(rule.allowed_headers) + + # Target version + if target_version: + ETree.SubElement(service_properties_element, 'DefaultServiceVersion').text = target_version + + # DeleteRetentionPolicy + if delete_retention_policy: + policy_element = ETree.SubElement(service_properties_element, 'DeleteRetentionPolicy') + ETree.SubElement(policy_element, 'Enabled').text = str(delete_retention_policy.enabled) + + if delete_retention_policy.enabled: + ETree.SubElement(policy_element, 'Days').text = str(delete_retention_policy.days) + + # StaticWebsite + if static_website: + static_website_element = ETree.SubElement(service_properties_element, 'StaticWebsite') + ETree.SubElement(static_website_element, 'Enabled').text = str(static_website.enabled) + + if static_website.enabled: + + if static_website.index_document is not None: + ETree.SubElement(static_website_element, 'IndexDocument').text = str(static_website.index_document) + + if static_website.error_document_404_path is not None: + ETree.SubElement(static_website_element, 'ErrorDocument404Path').text = \ + str(static_website.error_document_404_path) + + # Add xml declaration and serialize + try: + stream = BytesIO() + ETree.ElementTree(service_properties_element).write(stream, xml_declaration=True, encoding='utf-8', + method='xml') + except: + raise + finally: + output = stream.getvalue() + stream.close() + + return output + + +def _convert_metrics_to_xml(metrics, root): + ''' + version-number + true|false + true|false + + true|false + number-of-days + + ''' + # Version + ETree.SubElement(root, 'Version').text = metrics.version + + # Enabled + ETree.SubElement(root, 'Enabled').text = str(metrics.enabled) + + # IncludeAPIs + if metrics.enabled and metrics.include_apis is not None: + ETree.SubElement(root, 'IncludeAPIs').text = str(metrics.include_apis) + + # RetentionPolicy + retention_element = ETree.SubElement(root, 'RetentionPolicy') + _convert_retention_policy_to_xml(metrics.retention_policy, retention_element) + + +def _convert_retention_policy_to_xml(retention_policy, root): + ''' + true|false + number-of-days + ''' + # Enabled + ETree.SubElement(root, 'Enabled').text = str(retention_policy.enabled) + + # Days + if retention_policy.enabled and retention_policy.days: + ETree.SubElement(root, 'Days').text = str(retention_policy.days) + + +def _len_plus(data): + length = None + # Check if object implements the __len__ method, covers most input cases such as bytearray. + try: + length = len(data) + except: + pass + + if not length: + # Check if the stream is a file-like stream object. + # If so, calculate the size using the file descriptor. + try: + fileno = data.fileno() + except (AttributeError, UnsupportedOperation): + pass + else: + return fstat(fileno).st_size + + # If the stream is seekable and tell() is implemented, calculate the stream size. + try: + current_position = data.tell() + data.seek(0, SEEK_END) + length = data.tell() - current_position + data.seek(current_position, SEEK_SET) + except (AttributeError, UnsupportedOperation): + pass + + return length diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/cloudstorageaccount.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/cloudstorageaccount.py new file mode 100644 index 000000000000..f3ac1aa7be70 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/cloudstorageaccount.py @@ -0,0 +1,200 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +# Note that we import BlobService/QueueService/FileService on demand +# because this module is imported by azure/storage/__init__ +# ie. we don't want 'import azure.storage' to trigger an automatic import +# of blob/queue/file packages. + +from ._error import _validate_not_none +from .models import ( + ResourceTypes, + Services, + AccountPermissions, +) +from .sharedaccesssignature import ( + SharedAccessSignature, +) + +''' +from azure.storage.common._error import _validate_not_none +from azure.storage.common.models import ( + ResourceTypes, + Services, + AccountPermissions, +) +from azure.storage.common.sharedaccesssignature import ( + SharedAccessSignature, +) +''' + + +class CloudStorageAccount(object): + """ + Provides a factory for creating the blob, queue, and file services + with a common account name and account key or sas token. Users can either + use the factory or can construct the appropriate service directly. + """ + + def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=None): + ''' + :param str account_name: + The storage account name. This is used to authenticate requests + signed with an account key and to construct the storage endpoint. It + is required unless is_emulated is used. + :param str account_key: + The storage account key. This is used for shared key authentication. + :param str sas_token: + A shared access signature token to use to authenticate requests + instead of the account key. If account key and sas token are both + specified, account key will be used to sign. + :param bool is_emulated: + Whether to use the emulator. Defaults to False. If specified, will + override all other parameters. + ''' + self.account_name = account_name + self.account_key = account_key + self.sas_token = sas_token + self.is_emulated = is_emulated + + def create_block_blob_service(self): + ''' + Creates a BlockBlobService object with the settings specified in the + CloudStorageAccount. + + :return: A service object. + :rtype: :class:`~azure.storage.blob.blockblobservice.BlockBlobService` + ''' + try: + from azure.storage.blob.blockblobservice import BlockBlobService + return BlockBlobService(self.account_name, self.account_key, + sas_token=self.sas_token, + is_emulated=self.is_emulated) + except ImportError: + raise Exception('The package azure-storage-blob is required. ' + + 'Please install it using "pip install azure-storage-blob"') + + def create_page_blob_service(self): + ''' + Creates a PageBlobService object with the settings specified in the + CloudStorageAccount. + + :return: A service object. + :rtype: :class:`~azure.storage.blob.pageblobservice.PageBlobService` + ''' + try: + from azure.storage.blob.pageblobservice import PageBlobService + return PageBlobService(self.account_name, self.account_key, + sas_token=self.sas_token, + is_emulated=self.is_emulated) + except ImportError: + raise Exception('The package azure-storage-blob is required. ' + + 'Please install it using "pip install azure-storage-blob"') + + def create_append_blob_service(self): + ''' + Creates a AppendBlobService object with the settings specified in the + CloudStorageAccount. + + :return: A service object. + :rtype: :class:`~azure.storage.blob.appendblobservice.AppendBlobService` + ''' + try: + from azure.storage.blob.appendblobservice import AppendBlobService + return AppendBlobService(self.account_name, self.account_key, + sas_token=self.sas_token, + is_emulated=self.is_emulated) + except ImportError: + raise Exception('The package azure-storage-blob is required. ' + + 'Please install it using "pip install azure-storage-blob"') + + def create_queue_service(self): + ''' + Creates a QueueService object with the settings specified in the + CloudStorageAccount. + + :return: A service object. + :rtype: :class:`~azure.storage.queue.queueservice.QueueService` + ''' + try: + from azure.storage.queue.queueservice import QueueService + return QueueService(self.account_name, self.account_key, + sas_token=self.sas_token, + is_emulated=self.is_emulated) + except ImportError: + raise Exception('The package azure-storage-queue is required. ' + + 'Please install it using "pip install azure-storage-queue"') + + def create_file_service(self): + ''' + Creates a FileService object with the settings specified in the + CloudStorageAccount. + + :return: A service object. + :rtype: :class:`~azure.storage.file.fileservice.FileService` + ''' + try: + from azure.storage.file.fileservice import FileService + return FileService(self.account_name, self.account_key, + sas_token=self.sas_token) + except ImportError: + raise Exception('The package azure-storage-file is required. ' + + 'Please install it using "pip install azure-storage-file"') + + def generate_shared_access_signature(self, services, resource_types, + permission, expiry, start=None, + ip=None, protocol=None): + ''' + Generates a shared access signature for the account. + Use the returned signature with the sas_token parameter of the service + or to create a new account object. + + :param Services services: + Specifies the services accessible with the account SAS. You can + combine values to provide access to more than one service. + :param ResourceTypes resource_types: + Specifies the resource types that are accessible with the account + SAS. You can combine values to provide access to more than one + resource type. + :param AccountPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. You can combine + values to provide more than one permission. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. Possible values are + both HTTPS and HTTP (https,http) or HTTPS only (https). The default value + is https,http. Note that HTTP only is not a permitted value. + ''' + _validate_not_none('self.account_name', self.account_name) + _validate_not_none('self.account_key', self.account_key) + + sas = SharedAccessSignature(self.account_name, self.account_key) + return sas.generate_account(services, resource_types, permission, + expiry, start=start, ip=ip, protocol=protocol) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/models.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/models.py new file mode 100644 index 000000000000..5ada54ce29dd --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/models.py @@ -0,0 +1,672 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import sys + +if sys.version_info < (3,): + from collections import Iterable + + _unicode_type = unicode +else: + from collections.abc import Iterable + + _unicode_type = str + +from ._error import ( + _validate_not_none +) + + +class _HeaderDict(dict): + def __getitem__(self, index): + return super(_HeaderDict, self).__getitem__(index.lower()) + + +class _list(list): + '''Used so that additional properties can be set on the return list''' + pass + + +class _dict(dict): + '''Used so that additional properties can be set on the return dictionary''' + pass + + +class _OperationContext(object): + ''' + Contains information that lasts the lifetime of an operation. This operation + may span multiple calls to the Azure service. + + :ivar bool location_lock: + Whether the location should be locked for this operation. + :ivar str location: + The location to lock to. + ''' + + def __init__(self, location_lock=False): + self.location_lock = location_lock + self.host_location = None + + +class ListGenerator(Iterable): + ''' + A generator object used to list storage resources. The generator will lazily + follow the continuation tokens returned by the service and stop when all + resources have been returned or max_results is reached. + + If max_results is specified and the account has more than that number of + resources, the generator will have a populated next_marker field once it + finishes. This marker can be used to create a new generator if more + results are desired. + ''' + + def __init__(self, resources, list_method, list_args, list_kwargs): + self.items = resources + self.next_marker = resources.next_marker + + self._list_method = list_method + self._list_args = list_args + self._list_kwargs = list_kwargs + + def __iter__(self): + # return results + for i in self.items: + yield i + + while True: + # if no more results on the service, return + if not self.next_marker: + break + + # update the marker args + self._list_kwargs['marker'] = self.next_marker + + # handle max results, if present + max_results = self._list_kwargs.get('max_results') + if max_results is not None: + max_results = max_results - len(self.items) + + # if we've reached max_results, return + # else, update the max_results arg + if max_results <= 0: + break + else: + self._list_kwargs['max_results'] = max_results + + # get the next segment + resources = self._list_method(*self._list_args, **self._list_kwargs) + self.items = resources + self.next_marker = resources.next_marker + + # return results + for i in self.items: + yield i + + +class RetryContext(object): + ''' + Contains the request and response information that can be used to determine + whether and how to retry. This context is stored across retries and may be + used to store other information relevant to the retry strategy. + + :ivar ~azure.storage.common._http.HTTPRequest request: + The request sent to the storage service. + :ivar ~azure.storage.common._http.HTTPResponse response: + The response returned by the storage service. + :ivar LocationMode location_mode: + The location the request was sent to. + :ivar Exception exception: + The exception that just occurred. The type could either be AzureException (for HTTP errors), + or other Exception types from lower layers, which are kept unwrapped for easier processing. + :ivar bool is_emulated: + Whether retry is targeting the emulator. The default value is False. + :ivar int body_position: + The initial position of the body stream. It is useful when retries happen and we need to rewind the stream. + ''' + + def __init__(self): + self.request = None + self.response = None + self.location_mode = None + self.exception = None + self.is_emulated = False + self.body_position = None + + +class LocationMode(object): + ''' + Specifies the location the request should be sent to. This mode only applies + for RA-GRS accounts which allow secondary read access. All other account types + must use PRIMARY. + ''' + + PRIMARY = 'primary' + ''' Requests should be sent to the primary location. ''' + + SECONDARY = 'secondary' + ''' Requests should be sent to the secondary location, if possible. ''' + + +class RetentionPolicy(object): + ''' + By default, Storage Analytics will not delete any logging or metrics data. Blobs + will continue to be written until the shared 20TB limit is + reached. Once the 20TB limit is reached, Storage Analytics will stop writing + new data and will not resume until free space is available. This 20TB limit + is independent of the total limit for your storage account. + + There are two ways to delete Storage Analytics data: by manually making deletion + requests or by setting a data retention policy. Manual requests to delete Storage + Analytics data are billable, but delete requests resulting from a retention policy + are not billable. + ''' + + def __init__(self, enabled=False, days=None): + ''' + :param bool enabled: + Indicates whether a retention policy is enabled for the + storage service. If disabled, logging and metrics data will be retained + infinitely by the service unless explicitly deleted. + :param int days: + Required if enabled is true. Indicates the number of + days that metrics or logging data should be retained. All data older + than this value will be deleted. The minimum value you can specify is 1; + the largest value is 365 (one year). + ''' + _validate_not_none("enabled", enabled) + if enabled: + _validate_not_none("days", days) + + self.enabled = enabled + self.days = days + + +class Logging(object): + ''' + Storage Analytics logs detailed information about successful and failed requests + to a storage service. This information can be used to monitor individual requests + and to diagnose issues with a storage service. Requests are logged on a best-effort + basis. + + All logs are stored in block blobs in a container named $logs, which is + automatically created when Storage Analytics is enabled for a storage account. + The $logs container is located in the blob namespace of the storage account. + This container cannot be deleted once Storage Analytics has been enabled, though + its contents can be deleted. + + For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343262.aspx + ''' + + def __init__(self, delete=False, read=False, write=False, + retention_policy=None): + ''' + :param bool delete: + Indicates whether all delete requests should be logged. + :param bool read: + Indicates whether all read requests should be logged. + :param bool write: + Indicates whether all write requests should be logged. + :param RetentionPolicy retention_policy: + The retention policy for the metrics. + ''' + _validate_not_none("read", read) + _validate_not_none("write", write) + _validate_not_none("delete", delete) + + self.version = u'1.0' + self.delete = delete + self.read = read + self.write = write + self.retention_policy = retention_policy if retention_policy else RetentionPolicy() + + +class Metrics(object): + ''' + Metrics include aggregated transaction statistics and capacity data about requests + to a storage service. Transactions are reported at both the API operation level + as well as at the storage service level, and capacity is reported at the storage + service level. Metrics data can be used to analyze storage service usage, diagnose + issues with requests made against the storage service, and to improve the + performance of applications that use a service. + + For more information, see https://msdn.microsoft.com/en-us/library/azure/hh343258.aspx + ''' + + def __init__(self, enabled=False, include_apis=None, + retention_policy=None): + ''' + :param bool enabled: + Indicates whether metrics are enabled for + the service. + :param bool include_apis: + Required if enabled is True. Indicates whether metrics + should generate summary statistics for called API operations. + :param RetentionPolicy retention_policy: + The retention policy for the metrics. + ''' + _validate_not_none("enabled", enabled) + if enabled: + _validate_not_none("include_apis", include_apis) + + self.version = u'1.0' + self.enabled = enabled + self.include_apis = include_apis + self.retention_policy = retention_policy if retention_policy else RetentionPolicy() + + +class CorsRule(object): + ''' + CORS is an HTTP feature that enables a web application running under one domain + to access resources in another domain. Web browsers implement a security + restriction known as same-origin policy that prevents a web page from calling + APIs in a different domain; CORS provides a secure way to allow one domain + (the origin domain) to call APIs in another domain. + + For more information, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx + ''' + + def __init__(self, allowed_origins, allowed_methods, max_age_in_seconds=0, + exposed_headers=None, allowed_headers=None): + ''' + :param allowed_origins: + A list of origin domains that will be allowed via CORS, or "*" to allow + all domains. The list of must contain at least one entry. Limited to 64 + origin domains. Each allowed origin can have up to 256 characters. + :type allowed_origins: list(str) + :param allowed_methods: + A list of HTTP methods that are allowed to be executed by the origin. + The list of must contain at least one entry. For Azure Storage, + permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. + :type allowed_methods: list(str) + :param int max_age_in_seconds: + The number of seconds that the client/browser should cache a + preflight response. + :param exposed_headers: + Defaults to an empty list. A list of response headers to expose to CORS + clients. Limited to 64 defined headers and two prefixed headers. Each + header can be up to 256 characters. + :type exposed_headers: list(str) + :param allowed_headers: + Defaults to an empty list. A list of headers allowed to be part of + the cross-origin request. Limited to 64 defined headers and 2 prefixed + headers. Each header can be up to 256 characters. + :type allowed_headers: list(str) + ''' + _validate_not_none("allowed_origins", allowed_origins) + _validate_not_none("allowed_methods", allowed_methods) + _validate_not_none("max_age_in_seconds", max_age_in_seconds) + + self.allowed_origins = allowed_origins if allowed_origins else list() + self.allowed_methods = allowed_methods if allowed_methods else list() + self.max_age_in_seconds = max_age_in_seconds + self.exposed_headers = exposed_headers if exposed_headers else list() + self.allowed_headers = allowed_headers if allowed_headers else list() + + +class DeleteRetentionPolicy(object): + ''' + To set DeleteRetentionPolicy, you must call Set Blob Service Properties using version 2017-07-29 or later. + This class groups the settings related to delete retention policy. + ''' + + def __init__(self, enabled=False, days=None): + ''' + :param bool enabled: + Required. Indicates whether a deleted blob or snapshot is retained or immediately removed by delete operation. + :param int days: + Required only if Enabled is true. Indicates the number of days that deleted blob be retained. + All data older than this value will be permanently deleted. + The minimum value you can specify is 1; the largest value is 365. + ''' + _validate_not_none("enabled", enabled) + if enabled: + _validate_not_none("days", days) + + self.enabled = enabled + self.days = days + + +class StaticWebsite(object): + ''' + Class representing the service properties pertaining to static websites. + To set StaticWebsite, you must call Set Blob Service Properties using version 2018-03-28 or later. + ''' + + def __init__(self, enabled=False, index_document=None, error_document_404_path=None): + ''' + :param bool enabled: + Required. True if static websites should be enabled on the blob service for the corresponding Storage Account. + :param str index_document: + Represents the name of the index document. This is commonly "index.html". + :param str error_document_404_path: + Represents the path to the error document that should be shown when an error 404 is issued, + in other words, when a browser requests a page that does not exist. + ''' + _validate_not_none("enabled", enabled) + + self.enabled = enabled + self.index_document = index_document + self.error_document_404_path = error_document_404_path + + +class ServiceProperties(object): + ''' + Returned by get_*_service_properties functions. Contains the properties of a + storage service, including Analytics and CORS rules. + + Azure Storage Analytics performs logging and provides metrics data for a storage + account. You can use this data to trace requests, analyze usage trends, and + diagnose issues with your storage account. To use Storage Analytics, you must + enable it individually for each service you want to monitor. + + The aggregated data is stored in a well-known blob (for logging) and in well-known + tables (for metrics), which may be accessed using the Blob service and Table + service APIs. + + For an in-depth guide on using Storage Analytics and other tools to identify, + diagnose, and troubleshoot Azure Storage-related issues, see + http://azure.microsoft.com/documentation/articles/storage-monitoring-diagnosing-troubleshooting/ + + For more information on CORS, see https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx + ''' + + pass + + +class ServiceStats(object): + ''' + Returned by get_*_service_stats functions. Contains statistics related to + replication for the given service. It is only available when read-access + geo-redundant replication is enabled for the storage account. + + :ivar GeoReplication geo_replication: + An object containing statistics related to replication for the given service. + ''' + pass + + +class GeoReplication(object): + ''' + Contains statistics related to replication for the given service. + + :ivar str status: + The status of the secondary location. Possible values are: + live: Indicates that the secondary location is active and operational. + bootstrap: Indicates initial synchronization from the primary location + to the secondary location is in progress. This typically occurs + when replication is first enabled. + unavailable: Indicates that the secondary location is temporarily + unavailable. + :ivar date last_sync_time: + A GMT date value, to the second. All primary writes preceding this value + are guaranteed to be available for read operations at the secondary. + Primary writes after this point in time may or may not be available for + reads. The value may be empty if LastSyncTime is not available. This can + happen if the replication status is bootstrap or unavailable. Although + geo-replication is continuously enabled, the LastSyncTime result may + reflect a cached value from the service that is refreshed every few minutes. + ''' + pass + + +class AccessPolicy(object): + ''' + Access Policy class used by the set and get acl methods in each service. + + A stored access policy can specify the start time, expiry time, and + permissions for the Shared Access Signatures with which it's associated. + Depending on how you want to control access to your resource, you can + specify all of these parameters within the stored access policy, and omit + them from the URL for the Shared Access Signature. Doing so permits you to + modify the associated signature's behavior at any time, as well as to revoke + it. Or you can specify one or more of the access policy parameters within + the stored access policy, and the others on the URL. Finally, you can + specify all of the parameters on the URL. In this case, you can use the + stored access policy to revoke the signature, but not to modify its behavior. + + Together the Shared Access Signature and the stored access policy must + include all fields required to authenticate the signature. If any required + fields are missing, the request will fail. Likewise, if a field is specified + both in the Shared Access Signature URL and in the stored access policy, the + request will fail with status code 400 (Bad Request). + ''' + + def __init__(self, permission=None, expiry=None, start=None): + ''' + :param str permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + ''' + self.start = start + self.expiry = expiry + self.permission = permission + + +class Protocol(object): + ''' + Specifies the protocol permitted for a SAS token. Note that HTTP only is + not allowed. + ''' + + HTTPS = 'https' + ''' Allow HTTPS requests only. ''' + + HTTPS_HTTP = 'https,http' + ''' Allow HTTP and HTTPS requests. ''' + + +class ResourceTypes(object): + ''' + Specifies the resource types that are accessible with the account SAS. + + :ivar ResourceTypes ResourceTypes.CONTAINER: + Access to container-level APIs (e.g., Create/Delete Container, + Create/Delete Queue, Create/Delete Share, + List Blobs/Files and Directories) + :ivar ResourceTypes ResourceTypes.OBJECT: + Access to object-level APIs for blobs, queue messages, and + files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) + :ivar ResourceTypes ResourceTypes.SERVICE: + Access to service-level APIs (e.g., Get/Set Service Properties, + Get Service Stats, List Containers/Queues/Shares) + ''' + + def __init__(self, service=False, container=False, object=False, _str=None): + ''' + :param bool service: + Access to service-level APIs (e.g., Get/Set Service Properties, + Get Service Stats, List Containers/Queues/Shares) + :param bool container: + Access to container-level APIs (e.g., Create/Delete Container, + Create/Delete Queue, Create/Delete Share, + List Blobs/Files and Directories) + :param bool object: + Access to object-level APIs for blobs, queue messages, and + files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) + :param str _str: + A string representing the resource types. + ''' + if not _str: + _str = '' + self.service = service or ('s' in _str) + self.container = container or ('c' in _str) + self.object = object or ('o' in _str) + + def __or__(self, other): + return ResourceTypes(_str=str(self) + str(other)) + + def __add__(self, other): + return ResourceTypes(_str=str(self) + str(other)) + + def __str__(self): + return (('s' if self.service else '') + + ('c' if self.container else '') + + ('o' if self.object else '')) + + +ResourceTypes.SERVICE = ResourceTypes(service=True) +ResourceTypes.CONTAINER = ResourceTypes(container=True) +ResourceTypes.OBJECT = ResourceTypes(object=True) + + +class Services(object): + ''' + Specifies the services accessible with the account SAS. + + :ivar Services Services.BLOB: The blob service. + :ivar Services Services.FILE: The file service + :ivar Services Services.QUEUE: The queue service. + :ivar Services Services.TABLE: The table service. + ''' + + def __init__(self, blob=False, queue=False, file=False, table=False, _str=None): + ''' + :param bool blob: + Access to any blob service, for example, the `.BlockBlobService` + :param bool queue: + Access to the `.QueueService` + :param bool file: + Access to the `.FileService` + :param bool table: + Access to the TableService + :param str _str: + A string representing the services. + ''' + if not _str: + _str = '' + self.blob = blob or ('b' in _str) + self.queue = queue or ('q' in _str) + self.file = file or ('f' in _str) + self.table = table or ('t' in _str) + + def __or__(self, other): + return Services(_str=str(self) + str(other)) + + def __add__(self, other): + return Services(_str=str(self) + str(other)) + + def __str__(self): + return (('b' if self.blob else '') + + ('q' if self.queue else '') + + ('t' if self.table else '') + + ('f' if self.file else '')) + + +Services.BLOB = Services(blob=True) +Services.QUEUE = Services(queue=True) +Services.TABLE = Services(table=True) +Services.FILE = Services(file=True) + + +class AccountPermissions(object): + ''' + :class:`~ResourceTypes` class to be used with generate_shared_access_signature + method and for the AccessPolicies used with set_*_acl. There are two types of + SAS which may be used to grant resource access. One is to grant access to a + specific resource (resource-specific). Another is to grant access to the + entire service for a specific account and allow certain operations based on + perms found here. + + :ivar AccountPermissions AccountPermissions.ADD: + Valid for the following Object resource types only: queue messages and append blobs. + :ivar AccountPermissions AccountPermissions.CREATE: + Valid for the following Object resource types only: blobs and files. Users + can create new blobs or files, but may not overwrite existing blobs or files. + :ivar AccountPermissions AccountPermissions.DELETE: + Valid for Container and Object resource types, except for queue messages. + :ivar AccountPermissions AccountPermissions.LIST: + Valid for Service and Container resource types only. + :ivar AccountPermissions AccountPermissions.PROCESS: + Valid for the following Object resource type only: queue messages. + :ivar AccountPermissions AccountPermissions.READ: + Valid for all signed resources types (Service, Container, and Object). + Permits read permissions to the specified resource type. + :ivar AccountPermissions AccountPermissions.UPDATE: + Valid for the following Object resource types only: queue messages. + :ivar AccountPermissions AccountPermissions.WRITE: + Valid for all signed resources types (Service, Container, and Object). + Permits write permissions to the specified resource type. + ''' + + def __init__(self, read=False, write=False, delete=False, list=False, + add=False, create=False, update=False, process=False, _str=None): + ''' + :param bool read: + Valid for all signed resources types (Service, Container, and Object). + Permits read permissions to the specified resource type. + :param bool write: + Valid for all signed resources types (Service, Container, and Object). + Permits write permissions to the specified resource type. + :param bool delete: + Valid for Container and Object resource types, except for queue messages. + :param bool list: + Valid for Service and Container resource types only. + :param bool add: + Valid for the following Object resource types only: queue messages, and append blobs. + :param bool create: + Valid for the following Object resource types only: blobs and files. + Users can create new blobs or files, but may not overwrite existing + blobs or files. + :param bool update: + Valid for the following Object resource types only: queue messages. + :param bool process: + Valid for the following Object resource type only: queue messages. + :param str _str: + A string representing the permissions. + ''' + if not _str: + _str = '' + self.read = read or ('r' in _str) + self.write = write or ('w' in _str) + self.delete = delete or ('d' in _str) + self.list = list or ('l' in _str) + self.add = add or ('a' in _str) + self.create = create or ('c' in _str) + self.update = update or ('u' in _str) + self.process = process or ('p' in _str) + + def __or__(self, other): + return AccountPermissions(_str=str(self) + str(other)) + + def __add__(self, other): + return AccountPermissions(_str=str(self) + str(other)) + + def __str__(self): + return (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('l' if self.list else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('u' if self.update else '') + + ('p' if self.process else '')) + + +AccountPermissions.READ = AccountPermissions(read=True) +AccountPermissions.WRITE = AccountPermissions(write=True) +AccountPermissions.DELETE = AccountPermissions(delete=True) +AccountPermissions.LIST = AccountPermissions(list=True) +AccountPermissions.ADD = AccountPermissions(add=True) +AccountPermissions.CREATE = AccountPermissions(create=True) +AccountPermissions.UPDATE = AccountPermissions(update=True) +AccountPermissions.PROCESS = AccountPermissions(process=True) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/retry.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/retry.py new file mode 100644 index 000000000000..85764430259d --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/retry.py @@ -0,0 +1,306 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from abc import ABCMeta +from math import pow +import random +from io import (SEEK_SET, UnsupportedOperation) + +from .models import LocationMode +from ._constants import ( + DEV_ACCOUNT_NAME, + DEV_ACCOUNT_SECONDARY_NAME +) + + +class _Retry(object): + ''' + The base class for Exponential and Linear retries containing shared code. + ''' + __metaclass__ = ABCMeta + + def __init__(self, max_attempts, retry_to_secondary): + ''' + Constructs a base retry object. + + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + ''' + self.max_attempts = max_attempts + self.retry_to_secondary = retry_to_secondary + + def _should_retry(self, context): + ''' + A function which determines whether or not to retry. + + :param ~azure.storage.models.RetryContext context: + The retry context. This contains the request, response, and other data + which can be used to determine whether or not to retry. + :return: + A boolean indicating whether or not to retry the request. + :rtype: bool + ''' + # If max attempts are reached, do not retry. + if context.count >= self.max_attempts: + return False + + status = None + if context.response and context.response.status: + status = context.response.status + + if status is None: + ''' + If status is None, retry as this request triggered an exception. For + example, network issues would trigger this. + ''' + return True + elif 200 <= status < 300: + ''' + This method is called after a successful response, meaning we failed + during the response body download or parsing. So, success codes should + be retried. + ''' + return True + elif 300 <= status < 500: + ''' + An exception occured, but in most cases it was expected. Examples could + include a 309 Conflict or 412 Precondition Failed. + ''' + if status == 404 and context.location_mode == LocationMode.SECONDARY: + # Response code 404 should be retried if secondary was used. + return True + if status == 408: + # Response code 408 is a timeout and should be retried. + return True + return False + elif status >= 500: + ''' + Response codes above 500 with the exception of 501 Not Implemented and + 505 Version Not Supported indicate a server issue and should be retried. + ''' + if status == 501 or status == 505: + return False + return True + else: + # If something else happened, it's unexpected. Retry. + return True + + def _set_next_host_location(self, context): + ''' + A function which sets the next host location on the request, if applicable. + + :param ~azure.storage.models.RetryContext context: + The retry context containing the previous host location and the request + to evaluate and possibly modify. + ''' + if len(context.request.host_locations) > 1: + # If there's more than one possible location, retry to the alternative + if context.location_mode == LocationMode.PRIMARY: + context.location_mode = LocationMode.SECONDARY + + # if targeting the emulator (with path style), change path instead of host + if context.is_emulated: + # replace the first instance of primary account name with the secondary account name + context.request.path = context.request.path.replace(DEV_ACCOUNT_NAME, DEV_ACCOUNT_SECONDARY_NAME, 1) + else: + context.request.host = context.request.host_locations.get(context.location_mode) + else: + context.location_mode = LocationMode.PRIMARY + + # if targeting the emulator (with path style), change path instead of host + if context.is_emulated: + # replace the first instance of secondary account name with the primary account name + context.request.path = context.request.path.replace(DEV_ACCOUNT_SECONDARY_NAME, DEV_ACCOUNT_NAME, 1) + else: + context.request.host = context.request.host_locations.get(context.location_mode) + + def _retry(self, context, backoff): + ''' + A function which determines whether and how to retry. + + :param ~azure.storage.models.RetryContext context: + The retry context. This contains the request, response, and other data + which can be used to determine whether or not to retry. + :param function() backoff: + A function which returns the backoff time if a retry is to be performed. + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + ''' + # If the context does not contain a count parameter, this request has not + # been retried yet. Add the count parameter to track the number of retries. + if not hasattr(context, 'count'): + context.count = 0 + + # Determine whether to retry, and if so increment the count, modify the + # request as desired, and return the backoff. + if self._should_retry(context): + backoff_interval = backoff(context) + context.count += 1 + + # If retry to secondary is enabled, attempt to change the host if the + # request allows it + if self.retry_to_secondary: + self._set_next_host_location(context) + + # rewind the request body if it is a stream + if hasattr(context.request.body, 'read'): + # no position was saved, then retry would not work + if context.body_position is None: + return None + else: + try: + # attempt to rewind the body to the initial position + context.request.body.seek(context.body_position, SEEK_SET) + except UnsupportedOperation: + # if body is not seekable, then retry would not work + return None + + return backoff_interval + + return None + + +class ExponentialRetry(_Retry): + ''' + Exponential retry. + ''' + + def __init__(self, initial_backoff=15, increment_base=3, max_attempts=3, + retry_to_secondary=False, random_jitter_range=3): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__(max_attempts, retry_to_secondary) + + ''' + A function which determines whether and how to retry. + + :param ~azure.storage.models.RetryContext context: + The retry context. This contains the request, response, and other data + which can be used to determine whether or not to retry. + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + ''' + + def retry(self, context): + return self._retry(context, self._backoff) + + ''' + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + ''' + + def _backoff(self, context): + random_generator = random.Random() + backoff = self.initial_backoff + (0 if context.count == 0 else pow(self.increment_base, context.count)) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(_Retry): + ''' + Linear retry. + ''' + + def __init__(self, backoff=15, max_attempts=3, retry_to_secondary=False, random_jitter_range=3): + ''' + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.backoff = backoff + self.max_attempts = max_attempts + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__(max_attempts, retry_to_secondary) + + ''' + A function which determines whether and how to retry. + + :param ~azure.storage.models.RetryContext context: + The retry context. This contains the request, response, and other data + which can be used to determine whether or not to retry. + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + ''' + + def retry(self, context): + return self._retry(context, self._backoff) + + ''' + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + ''' + + def _backoff(self, context): + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + self.random_range_start = self.backoff - self.random_jitter_range if self.backoff > self.random_jitter_range else 0 + self.random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(self.random_range_start, self.random_range_end) + + +def no_retry(context): + ''' + Specifies never to retry. + + :param ~azure.storage.models.RetryContext context: + The retry context. + :return: + Always returns None to indicate never to retry. + :rtype: None + ''' + return None diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/sharedaccesssignature.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/sharedaccesssignature.py new file mode 100644 index 000000000000..c23201a85bcf --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/sharedaccesssignature.py @@ -0,0 +1,217 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from datetime import date + +from ._common_conversion import ( + _sign_string, + _to_str, +) +from ._constants import DEFAULT_X_MS_VERSION +from ._serialization import ( + url_quote, + _to_utc_datetime, +) + + +class SharedAccessSignature(object): + ''' + Provides a factory for creating account access + signature tokens with an account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key, x_ms_version=DEFAULT_X_MS_VERSION): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + :param str x_ms_version: + The service version used to generate the shared access signatures. + ''' + self.account_name = account_name + self.account_key = account_key + self.x_ms_version = x_ms_version + + def generate_account(self, services, resource_types, permission, expiry, start=None, + ip=None, protocol=None): + ''' + Generates a shared access signature for the account. + Use the returned signature with the sas_token parameter of the service + or to create a new account object. + + :param Services services: + Specifies the services accessible with the account SAS. You can + combine values to provide access to more than one service. + :param ResourceTypes resource_types: + Specifies the resource types that are accessible with the account + SAS. You can combine values to provide access to more than one + resource type. + :param AccountPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. You can combine + values to provide more than one permission. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + ''' + sas = _SharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_account(services, resource_types) + sas.add_account_signature(self.account_name, self.account_key) + + return sas.get_token() + + +class _QueryStringConstants(object): + SIGNED_SIGNATURE = 'sig' + SIGNED_PERMISSION = 'sp' + SIGNED_START = 'st' + SIGNED_EXPIRY = 'se' + SIGNED_RESOURCE = 'sr' + SIGNED_IDENTIFIER = 'si' + SIGNED_IP = 'sip' + SIGNED_PROTOCOL = 'spr' + SIGNED_VERSION = 'sv' + SIGNED_CACHE_CONTROL = 'rscc' + SIGNED_CONTENT_DISPOSITION = 'rscd' + SIGNED_CONTENT_ENCODING = 'rsce' + SIGNED_CONTENT_LANGUAGE = 'rscl' + SIGNED_CONTENT_TYPE = 'rsct' + START_PK = 'spk' + START_RK = 'srk' + END_PK = 'epk' + END_RK = 'erk' + SIGNED_RESOURCE_TYPES = 'srt' + SIGNED_SERVICES = 'ss' + + +class _SharedAccessHelper(object): + def __init__(self): + self.query_dict = {} + + def _add_query(self, name, val): + if val: + self.query_dict[name] = _to_str(val) + + def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): + if isinstance(start, date): + start = _to_utc_datetime(start) + + if isinstance(expiry, date): + expiry = _to_utc_datetime(expiry) + + self._add_query(_QueryStringConstants.SIGNED_START, start) + self._add_query(_QueryStringConstants.SIGNED_EXPIRY, expiry) + self._add_query(_QueryStringConstants.SIGNED_PERMISSION, permission) + self._add_query(_QueryStringConstants.SIGNED_IP, ip) + self._add_query(_QueryStringConstants.SIGNED_PROTOCOL, protocol) + self._add_query(_QueryStringConstants.SIGNED_VERSION, x_ms_version) + + def add_resource(self, resource): + self._add_query(_QueryStringConstants.SIGNED_RESOURCE, resource) + + def add_id(self, id): + self._add_query(_QueryStringConstants.SIGNED_IDENTIFIER, id) + + def add_account(self, services, resource_types): + self._add_query(_QueryStringConstants.SIGNED_SERVICES, services) + self._add_query(_QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) + + def add_override_response_headers(self, cache_control, + content_disposition, + content_encoding, + content_language, + content_type): + self._add_query(_QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) + self._add_query(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) + self._add_query(_QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) + self._add_query(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) + self._add_query(_QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) + + def add_resource_signature(self, account_name, account_key, service, path): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + if path[0] != '/': + path = '/' + path + + canonicalized_resource = '/' + service + '/' + account_name + path + '\n' + + # Form the string to sign from shared_access_policy and canonicalized + # resource. The order of values is important. + string_to_sign = \ + (get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(_QueryStringConstants.SIGNED_START) + + get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) + + canonicalized_resource + + get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) + + get_value_to_append(_QueryStringConstants.SIGNED_IP) + + get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(_QueryStringConstants.SIGNED_VERSION)) + + if service == 'blob' or service == 'file': + string_to_sign += \ + (get_value_to_append(_QueryStringConstants.SIGNED_CACHE_CONTROL) + + get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + + get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_ENCODING) + + get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + + get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_TYPE)) + + # remove the trailing newline + if string_to_sign[-1] == '\n': + string_to_sign = string_to_sign[:-1] + + self._add_query(_QueryStringConstants.SIGNED_SIGNATURE, + _sign_string(account_key, string_to_sign)) + + def add_account_signature(self, account_name, account_key): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + string_to_sign = \ + (account_name + '\n' + + get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(_QueryStringConstants.SIGNED_SERVICES) + + get_value_to_append(_QueryStringConstants.SIGNED_RESOURCE_TYPES) + + get_value_to_append(_QueryStringConstants.SIGNED_START) + + get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) + + get_value_to_append(_QueryStringConstants.SIGNED_IP) + + get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(_QueryStringConstants.SIGNED_VERSION)) + + self._add_query(_QueryStringConstants.SIGNED_SIGNATURE, + _sign_string(account_key, string_to_sign)) + + def get_token(self): + return '&'.join(['{0}={1}'.format(n, url_quote(v)) for n, v in self.query_dict.items() if v is not None]) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/storageclient.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/storageclient.py new file mode 100644 index 000000000000..859a729df466 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/storageclient.py @@ -0,0 +1,391 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +from abc import ABCMeta +import logging + +logger = logging.getLogger(__name__) +from time import sleep + +import requests +from azure.common import ( + AzureException, + AzureHttpError, +) + +from ._constants import ( + DEFAULT_SOCKET_TIMEOUT, + DEFAULT_X_MS_VERSION, + DEFAULT_USER_AGENT_STRING, + USER_AGENT_STRING_PREFIX, + USER_AGENT_STRING_SUFFIX, +) +from ._error import ( + _ERROR_DECRYPTION_FAILURE, + _http_error_handler, +) +from ._http import HTTPError +from ._http.httpclient import _HTTPClient +from ._serialization import ( + _update_request, + _add_date_header, +) +from .models import ( + RetryContext, + LocationMode, + _OperationContext, +) +from .retry import ExponentialRetry +from io import UnsupportedOperation + + +class StorageClient(object): + ''' + This is the base class for service objects. Service objects are used to do + all requests to Storage. This class cannot be instantiated directly. + + :ivar str account_name: + The storage account name. This is used to authenticate requests + signed with an account key and to construct the storage endpoint. It + is required unless a connection string is given, or if a custom + domain is used with anonymous authentication. + :ivar str account_key: + The storage account key. This is used for shared key authentication. + If neither account key or sas token is specified, anonymous access + will be used. + :ivar str sas_token: + A shared access signature token to use to authenticate requests + instead of the account key. If account key and sas token are both + specified, account key will be used to sign. If neither are + specified, anonymous access will be used. + :ivar str primary_endpoint: + The endpoint to send storage requests to. + :ivar str secondary_endpoint: + The secondary endpoint to read storage data from. This will only be a + valid endpoint if the storage account used is RA-GRS and thus allows + reading from secondary. + :ivar function(context) retry: + A function which determines whether to retry. Takes as a parameter a + :class:`~azure.storage.common.models.RetryContext` object. Returns the number + of seconds to wait before retrying the request, or None to indicate not + to retry. + :ivar ~azure.storage.common.models.LocationMode location_mode: + The host location to use to make requests. Defaults to LocationMode.PRIMARY. + Note that this setting only applies to RA-GRS accounts as other account + types do not allow reading from secondary. If the location_mode is set to + LocationMode.SECONDARY, read requests will be sent to the secondary endpoint. + Write requests will continue to be sent to primary. + :ivar str protocol: + The protocol to use for requests. Defaults to https. + :ivar requests.Session request_session: + The session object to use for http requests. + :ivar function(request) request_callback: + A function called immediately before each request is sent. This function + takes as a parameter the request object and returns nothing. It may be + used to added custom headers or log request data. + :ivar function() response_callback: + A function called immediately after each response is received. This + function takes as a parameter the response object and returns nothing. + It may be used to log response data. + :ivar function() retry_callback: + A function called immediately after retry evaluation is performed. This + function takes as a parameter the retry context object and returns nothing. + It may be used to detect retries and log context information. + ''' + + __metaclass__ = ABCMeta + + def __init__(self, connection_params): + ''' + :param obj connection_params: The parameters to use to construct the client. + ''' + self.account_name = connection_params.account_name + self.account_key = connection_params.account_key + self.sas_token = connection_params.sas_token + self.token_credential = connection_params.token_credential + self.is_emulated = connection_params.is_emulated + + self.primary_endpoint = connection_params.primary_endpoint + self.secondary_endpoint = connection_params.secondary_endpoint + + protocol = connection_params.protocol + request_session = connection_params.request_session or requests.Session() + socket_timeout = connection_params.socket_timeout or DEFAULT_SOCKET_TIMEOUT + self._httpclient = _HTTPClient( + protocol=protocol, + session=request_session, + timeout=socket_timeout, + ) + + self.retry = ExponentialRetry().retry + self.location_mode = LocationMode.PRIMARY + + self.request_callback = None + self.response_callback = None + self.retry_callback = None + self._X_MS_VERSION = DEFAULT_X_MS_VERSION + self._USER_AGENT_STRING = DEFAULT_USER_AGENT_STRING + + def _update_user_agent_string(self, service_package_version): + self._USER_AGENT_STRING = '{}{} {}'.format(USER_AGENT_STRING_PREFIX, + service_package_version, + USER_AGENT_STRING_SUFFIX) + + @property + def socket_timeout(self): + return self._httpclient.timeout + + @socket_timeout.setter + def socket_timeout(self, value): + self._httpclient.timeout = value + + @property + def protocol(self): + return self._httpclient.protocol + + @protocol.setter + def protocol(self, value): + self._httpclient.protocol = value + + @property + def request_session(self): + return self._httpclient.session + + @request_session.setter + def request_session(self, value): + self._httpclient.session = value + + def set_proxy(self, host, port, user=None, password=None): + ''' + Sets the proxy server host and port for the HTTP CONNECT Tunnelling. + + :param str host: Address of the proxy. Ex: '192.168.0.100' + :param int port: Port of the proxy. Ex: 6000 + :param str user: User for proxy authorization. + :param str password: Password for proxy authorization. + ''' + self._httpclient.set_proxy(host, port, user, password) + + def _get_host_locations(self, primary=True, secondary=False): + locations = {} + if primary: + locations[LocationMode.PRIMARY] = self.primary_endpoint + if secondary: + locations[LocationMode.SECONDARY] = self.secondary_endpoint + return locations + + def _apply_host(self, request, operation_context, retry_context): + if operation_context.location_lock and operation_context.host_location: + # If this is a location locked operation and the location is set, + # override the request location and host_location. + request.host_locations = operation_context.host_location + request.host = list(operation_context.host_location.values())[0] + retry_context.location_mode = list(operation_context.host_location.keys())[0] + elif len(request.host_locations) == 1: + # If only one location is allowed, use that location. + request.host = list(request.host_locations.values())[0] + retry_context.location_mode = list(request.host_locations.keys())[0] + else: + # If multiple locations are possible, choose based on the location mode. + request.host = request.host_locations.get(self.location_mode) + retry_context.location_mode = self.location_mode + + @staticmethod + def extract_date_and_request_id(retry_context): + if getattr(retry_context, 'response', None) is None: + return "" + resp = retry_context.response + + if 'date' in resp.headers and 'x-ms-request-id' in resp.headers: + return str.format("Server-Timestamp={0}, Server-Request-ID={1}", + resp.headers['date'], resp.headers['x-ms-request-id']) + elif 'date' in resp.headers: + return str.format("Server-Timestamp={0}", resp.headers['date']) + elif 'x-ms-request-id' in resp.headers: + return str.format("Server-Request-ID={0}", resp.headers['x-ms-request-id']) + else: + return "" + + def _perform_request(self, request, parser=None, parser_args=None, operation_context=None, expected_errors=None): + ''' + Sends the request and return response. Catches HTTPError and hands it + to error handler + ''' + operation_context = operation_context or _OperationContext() + retry_context = RetryContext() + retry_context.is_emulated = self.is_emulated + + # if request body is a stream, we need to remember its current position in case retries happen + if hasattr(request.body, 'read'): + try: + retry_context.body_position = request.body.tell() + except (AttributeError, UnsupportedOperation): + # if body position cannot be obtained, then retries will not work + pass + + # Apply the appropriate host based on the location mode + self._apply_host(request, operation_context, retry_context) + + # Apply common settings to the request + _update_request(request, self._X_MS_VERSION, self._USER_AGENT_STRING) + client_request_id_prefix = str.format("Client-Request-ID={0}", request.headers['x-ms-client-request-id']) + + while True: + try: + try: + # Execute the request callback + if self.request_callback: + self.request_callback(request) + + # Add date and auth after the callback so date doesn't get too old and + # authentication is still correct if signed headers are added in the request + # callback. This also ensures retry policies with long back offs + # will work as it resets the time sensitive headers. + _add_date_header(request) + + try: + # request can be signed individually + self.authentication.sign_request(request) + except AttributeError: + # session can also be signed + self.request_session = self.authentication.signed_session(self.request_session) + + # Set the request context + retry_context.request = request + + # Log the request before it goes out + logger.info("%s Outgoing request: Method=%s, Path=%s, Query=%s, Headers=%s.", + client_request_id_prefix, + request.method, + request.path, + request.query, + str(request.headers).replace('\n', '')) + + # Perform the request + response = self._httpclient.perform_request(request) + + # Execute the response callback + if self.response_callback: + self.response_callback(response) + + # Set the response context + retry_context.response = response + + # Log the response when it comes back + logger.info("%s Receiving Response: " + "%s, HTTP Status Code=%s, Message=%s, Headers=%s.", + client_request_id_prefix, + self.extract_date_and_request_id(retry_context), + response.status, + response.message, + str(response.headers).replace('\n', '')) + + # Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException + if response.status >= 300: + # This exception will be caught by the general error handler + # and raised as an azure http exception + _http_error_handler( + HTTPError(response.status, response.message, response.headers, response.body)) + + # Parse the response + if parser: + if parser_args: + args = [response] + args.extend(parser_args) + return parser(*args) + else: + return parser(response) + else: + return + except AzureException as ex: + retry_context.exception = ex + raise ex + except Exception as ex: + retry_context.exception = ex + if sys.version_info >= (3,): + # Automatic chaining in Python 3 means we keep the trace + raise AzureException(ex.args[0]) + else: + # There isn't a good solution in 2 for keeping the stack trace + # in general, or that will not result in an error in 3 + # However, we can keep the previous error type and message + # TODO: In the future we will log the trace + msg = "" + if len(ex.args) > 0: + msg = ex.args[0] + raise AzureException('{}: {}'.format(ex.__class__.__name__, msg)) + + except AzureException as ex: + # only parse the strings used for logging if logging is at least enabled for CRITICAL + if logger.isEnabledFor(logging.CRITICAL): + exception_str_in_one_line = str(ex).replace('\n', '') + status_code = retry_context.response.status if retry_context.response is not None else 'Unknown' + timestamp_and_request_id = self.extract_date_and_request_id(retry_context) + + # if the http error was expected, we should short-circuit + if isinstance(ex, AzureHttpError) and expected_errors is not None and ex.error_code in expected_errors: + logger.info("%s Received expected http error: " + "%s, HTTP status code=%s, Exception=%s.", + client_request_id_prefix, + timestamp_and_request_id, + status_code, + exception_str_in_one_line) + raise ex + + logger.info("%s Operation failed: checking if the operation should be retried. " + "Current retry count=%s, %s, HTTP status code=%s, Exception=%s.", + client_request_id_prefix, + retry_context.count if hasattr(retry_context, 'count') else 0, + timestamp_and_request_id, + status_code, + exception_str_in_one_line) + + # Decryption failures (invalid objects, invalid algorithms, data unencrypted in strict mode, etc) + # will not be resolved with retries. + if str(ex) == _ERROR_DECRYPTION_FAILURE: + logger.error("%s Encountered decryption failure: this cannot be retried. " + "%s, HTTP status code=%s, Exception=%s.", + client_request_id_prefix, + timestamp_and_request_id, + status_code, + exception_str_in_one_line) + raise ex + + # Determine whether a retry should be performed and if so, how + # long to wait before performing retry. + retry_interval = self.retry(retry_context) + if retry_interval is not None: + # Execute the callback + if self.retry_callback: + self.retry_callback(retry_context) + + logger.info( + "%s Retry policy is allowing a retry: Retry count=%s, Interval=%s.", + client_request_id_prefix, + retry_context.count, + retry_interval) + + # Sleep for the desired retry interval + sleep(retry_interval) + else: + logger.error("%s Retry policy did not allow for a retry: " + "%s, HTTP status code=%s, Exception=%s.", + client_request_id_prefix, + timestamp_and_request_id, + status_code, + exception_str_in_one_line) + raise ex + finally: + # If this is a location locked operation and the location is not set, + # this is the first request of that operation. Set the location to + # be used for subsequent requests in the operation. + if operation_context.location_lock and not operation_context.host_location: + # note: to cover the emulator scenario, the host_location is grabbed + # from request.host_locations(which includes the dev account name) + # instead of request.host(which at this point no longer includes the dev account name) + operation_context.host_location = { + retry_context.location_mode: request.host_locations[retry_context.location_mode]} diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/tokencredential.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/tokencredential.py new file mode 100644 index 000000000000..4d724ef06ad1 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/tokencredential.py @@ -0,0 +1,48 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import requests + + +class TokenCredential(object): + """ + Represents a token credential that is used to authorize HTTPS requests. + The token can be updated by the user. + + :ivar str token: + The authorization token. It can be set by the user at any point in a thread-safe way. + """ + + def __init__(self, initial_value=None): + """ + :param initial_value: initial value for the token. + """ + self.token = initial_value + + def signed_session(self, session=None): + """ + Sign requests session with the token. This method is called every time a request is going on the wire. + The user is responsible for updating the token with the preferred tool/SDK. + In general there are two options: + - override this method to update the token in a preferred way and set Authorization header on session + - not override this method, and have a timer that triggers periodically to update the token on this class + + The second option is recommended as it tends to be more performance-friendly. + + :param session: The session to configure for authentication + :type session: requests.Session + :rtype: requests.Session + """ + session = session or requests.Session() + session.headers['Authorization'] = "Bearer {}".format(self.token) + + return session + + def token(self, new_value): + """ + :param new_value: new value to be set as the token. + """ + self.token = new_value \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs/setup.py b/sdk/eventhub/azure-eventhubs/setup.py index f3cc586d2bd2..15b2ec62da43 100644 --- a/sdk/eventhub/azure-eventhubs/setup.py +++ b/sdk/eventhub/azure-eventhubs/setup.py @@ -78,7 +78,9 @@ 'uamqp~=1.2.0', 'msrestazure>=0.4.32,<2.0.0', 'azure-common~=1.1', - 'azure-storage-blob~=1.3', + 'cryptography', + 'python-dateutil', + 'requests' # 'azure-core>=0.0.1', # will add back here and remove from dev_requirements.txt after azure core is released ], extras_require={ From 002a50f42640328b317ea6831b729675884c634f Mon Sep 17 00:00:00 2001 From: yijxie Date: Fri, 21 Jun 2019 18:04:05 -0700 Subject: [PATCH 37/54] Add vendored blob-storage required pkg --- sdk/eventhub/azure-eventhubs/setup.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/setup.py b/sdk/eventhub/azure-eventhubs/setup.py index 15b2ec62da43..15a77099e2c6 100644 --- a/sdk/eventhub/azure-eventhubs/setup.py +++ b/sdk/eventhub/azure-eventhubs/setup.py @@ -78,9 +78,9 @@ 'uamqp~=1.2.0', 'msrestazure>=0.4.32,<2.0.0', 'azure-common~=1.1', - 'cryptography', - 'python-dateutil', - 'requests' + 'python-dateutil>=2.8.0', + 'cryptography>=2.1.4', + 'requests>=2.18.4', # 'azure-core>=0.0.1', # will add back here and remove from dev_requirements.txt after azure core is released ], extras_require={ From cb5f979ccf9f7839e6292a6c29ce331cd4b86166 Mon Sep 17 00:00:00 2001 From: Yunhao Ling <47871814+yunhaoling@users.noreply.github.com> Date: Fri, 21 Jun 2019 21:41:54 -0700 Subject: [PATCH 38/54] Vendor storage 2.0.1 within EPH (#6031) --- .../eventprocessorhost/vendor/__init__.py | 2 +- .../vendor/storage/__init__.py | 2 +- .../vendor/storage/blob/_constants.py | 4 +- .../vendor/storage/blob/_deserialization.py | 106 +++++++++- .../vendor/storage/blob/_encryption.py | 4 +- .../vendor/storage/blob/_serialization.py | 43 +++- .../vendor/storage/blob/appendblobservice.py | 122 ++++++++++- .../vendor/storage/blob/baseblobservice.py | 195 ++++++++++++++---- .../vendor/storage/blob/blockblobservice.py | 148 ++++++++++++- .../vendor/storage/blob/models.py | 50 ++++- .../vendor/storage/blob/pageblobservice.py | 132 +++++++++++- .../storage/blob/sharedaccesssignature.py | 115 ++++++++++- .../vendor/storage/common/__init__.py | 1 + .../vendor/storage/common/_auth.py | 38 ++-- .../vendor/storage/common/_connection.py | 7 +- .../vendor/storage/common/_constants.py | 6 +- .../vendor/storage/common/_error.py | 35 ++++ .../storage/common/cloudstorageaccount.py | 36 ++-- .../vendor/storage/common/retry.py | 2 +- .../storage/common/sharedaccesssignature.py | 37 ---- .../vendor/storage/common/storageclient.py | 93 +++++++-- .../vendor/vendor_azure_storage_version.md | 2 + 22 files changed, 1012 insertions(+), 168 deletions(-) create mode 100644 sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/vendor_azure_storage_version.md diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/__init__.py index de40ea7ca058..0260537a02bb 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/__init__.py @@ -1 +1 @@ -__import__('pkg_resources').declare_namespace(__name__) +__path__ = __import__('pkgutil').extend_path(__path__, __name__) \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/__init__.py index de40ea7ca058..0260537a02bb 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/__init__.py @@ -1 +1 @@ -__import__('pkg_resources').declare_namespace(__name__) +__path__ = __import__('pkgutil').extend_path(__path__, __name__) \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_constants.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_constants.py index b450d83e430d..062a035662e3 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_constants.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_constants.py @@ -5,10 +5,10 @@ # -------------------------------------------------------------------------- __author__ = 'Microsoft Corp. ' -__version__ = '1.3.1' +__version__ = '2.0.1' # x-ms-version for storage service. -X_MS_VERSION = '2018-03-28' +X_MS_VERSION = '2018-11-09' # internal configurations, should not be changed _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_deserialization.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_deserialization.py index 3365ebfa726c..969f256b4a76 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_deserialization.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_deserialization.py @@ -10,7 +10,6 @@ from xml.etree import cElementTree as ETree except ImportError: from xml.etree import ElementTree as ETree - from ..common._common_conversion import ( _decode_base64_to_text, _to_str, @@ -37,6 +36,7 @@ ResourceProperties, BlobPrefix, AccountInformation, + UserDelegationKey, ) from ._encryption import _decrypt_blob from ..common.models import _list @@ -352,6 +352,77 @@ def _convert_xml_to_blob_list(response): return blob_list +def _convert_xml_to_blob_name_list(response): + ''' + + + string-value + string-value + int-value + string-value + + + blob-name + true + date-time-value + + date-time-value + etag + size-in-bytes + blob-content-type + + + + + sequence-number + BlockBlob|PageBlob|AppendBlob + locked|unlocked + available | leased | expired | breaking | broken + infinite | fixed + id + pending | success | aborted | failed + source url + bytes copied/bytes total + datetime + error string + P4 | P6 | P10 | P20 | P30 | P40 | P50 | P60 | Archive | Cool | Hot + date-time-value + true + datetime + int + date-time-value + + + value + + + + blob-prefix + + + + + ''' + if response is None or response.body is None: + return None + + blob_list = _list() + list_element = ETree.fromstring(response.body) + + setattr(blob_list, 'next_marker', list_element.findtext('NextMarker')) + + blobs_element = list_element.find('Blobs') + blob_prefix_elements = blobs_element.findall('BlobPrefix') + if blob_prefix_elements is not None: + for blob_prefix_element in blob_prefix_elements: + blob_list.append(blob_prefix_element.findtext('Name')) + + for blob_element in blobs_element.findall('Blob'): + blob_list.append(blob_element.findtext('Name')) + + return blob_list + + def _convert_xml_to_block_list(response): ''' @@ -450,3 +521,36 @@ def _parse_account_information(response): account_info.account_kind = response.headers['x-ms-account-kind'] return account_info + + +def _convert_xml_to_user_delegation_key(response): + """ + + + Guid + Guid + String, formatted ISO Date + String, formatted ISO Date + b + String, rest api version used to create delegation key + Ovg+o0K/0/2V8upg7AwlyAPCriEcOSXKuBu2Gv/PU70Y7aWDW3C2ZRmw6kYWqPWBaM1GosLkcSZkgsobAlT+Sw== + + + Converts xml response to UserDelegationKey class. + """ + + if response is None or response.body is None: + return None + + delegation_key = UserDelegationKey() + + key_element = ETree.fromstring(response.body) + delegation_key.signed_oid = key_element.findtext('SignedOid') + delegation_key.signed_tid = key_element.findtext('SignedTid') + delegation_key.signed_start = key_element.findtext('SignedStart') + delegation_key.signed_expiry = key_element.findtext('SignedExpiry') + delegation_key.signed_service = key_element.findtext('SignedService') + delegation_key.signed_version = key_element.findtext('SignedVersion') + delegation_key.value = key_element.findtext('Value') + + return delegation_key diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_encryption.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_encryption.py index f1e9b540b0bf..757b49067475 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_encryption.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_encryption.py @@ -123,8 +123,8 @@ def _decrypt_blob(require_encryption, key_encryption_key, key_resolver, except: if require_encryption: raise ValueError(_ERROR_DATA_NOT_ENCRYPTED) - else: - return content + + return content if not (encryption_data.encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_CBC_256): raise ValueError(_ERROR_UNSUPPORTED_ENCRYPTION_ALGORITHM) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_serialization.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_serialization.py index 100b40898561..611d73db5093 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_serialization.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/_serialization.py @@ -4,7 +4,7 @@ # license information. # -------------------------------------------------------------------------- from xml.sax.saxutils import escape as xml_escape - +from datetime import date try: from xml.etree import cElementTree as ETree except ImportError: @@ -13,6 +13,9 @@ _encode_base64, _str, ) +from ..common._serialization import ( + _to_utc_datetime, +) from ..common._error import ( _validate_not_none, _ERROR_START_END_NEEDED_FOR_MD5, @@ -46,7 +49,8 @@ def _get_path(container_name=None, blob_name=None): def _validate_and_format_range_headers(request, start_range, end_range, start_range_required=True, - end_range_required=True, check_content_md5=False, align_to_page=False): + end_range_required=True, check_content_md5=False, align_to_page=False, + range_header_name='x-ms-range'): # If end range is provided, start range must be provided if start_range_required or end_range is not None: _validate_not_none('start_range', start_range) @@ -63,9 +67,9 @@ def _validate_and_format_range_headers(request, start_range, end_range, start_ra # Format based on whether end_range is present request.headers = request.headers or {} if end_range is not None: - request.headers['x-ms-range'] = 'bytes={0}-{1}'.format(start_range, end_range) + request.headers[range_header_name] = 'bytes={0}-{1}'.format(start_range, end_range) elif start_range is not None: - request.headers['x-ms-range'] = "bytes={0}-".format(start_range) + request.headers[range_header_name] = "bytes={0}-".format(start_range) # Content MD5 can only be provided for a complete range less than 4MB in size if check_content_md5: @@ -116,3 +120,34 @@ def _convert_block_list_to_xml(block_id_list): # return xml value return output + + +def _convert_delegation_key_info_to_xml(start_time, expiry_time): + """ + + + String, formatted ISO Date + String, formatted ISO Date + + + Convert key info to xml to send. + """ + if start_time is None or expiry_time is None: + raise ValueError("delegation key start/end times are required") + + key_info_element = ETree.Element('KeyInfo') + ETree.SubElement(key_info_element, 'Start').text = \ + _to_utc_datetime(start_time) if isinstance(start_time, date) else start_time + ETree.SubElement(key_info_element, 'Expiry').text = \ + _to_utc_datetime(expiry_time) if isinstance(expiry_time, date) else expiry_time + + # Add xml declaration and serialize + try: + stream = BytesIO() + ETree.ElementTree(key_info_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml') + finally: + output = stream.getvalue() + stream.close() + + # return xml value + return output diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/appendblobservice.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/appendblobservice.py index 8369cb3727e9..266852c21468 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/appendblobservice.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/appendblobservice.py @@ -33,6 +33,7 @@ ) from ._serialization import ( _get_path, + _validate_and_format_range_headers, ) from ._upload_chunking import ( _AppendBlobChunkUploader, @@ -112,7 +113,7 @@ def __init__(self, account_name=None, account_key=None, sas_token=None, is_emula :param token_credential: A token credential used to authenticate HTTPS requests. The token value should be updated before its expiration. - :type `~..common.TokenCredential` + :type `~azure.storage.common.TokenCredential` ''' self.blob_type = _BlobTypes.AppendBlob super(AppendBlobService, self).__init__( @@ -286,6 +287,125 @@ def append_block(self, container_name, blob_name, block, return self._perform_request(request, _parse_append_block) + def append_block_from_url(self, container_name, blob_name, copy_source_url, source_range_start=None, + source_range_end=None, source_content_md5=None, source_if_modified_since=None, + source_if_unmodified_since=None, source_if_match=None, + source_if_none_match=None, maxsize_condition=None, + appendpos_condition=None, lease_id=None, if_modified_since=None, + if_unmodified_since=None, if_match=None, + if_none_match=None, timeout=None): + """ + Creates a new block to be committed as part of a blob, where the contents are read from a source url. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob. + :param str copy_source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int source_range_start: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + :param int source_range_end: + This indicates the end of the range of bytes(inclusive) that has to be taken from the copy source. + :param str source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :param datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :param datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :param str source_if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the source resource's ETag matches the value specified. + :param str source_if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the source resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the source resource does not exist, and fail the + operation if it does exist. + :param int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :param int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the + AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :param str lease_id: + Required if the blob has an active lease. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + """ + _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('copy_source_url', copy_source_url) + + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'appendblock', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-copy-source': copy_source_url, + 'x-ms-source-content-md5': source_content_md5, + 'x-ms-source-if-Modified-Since': _datetime_to_utc_string(source_if_modified_since), + 'x-ms-source-if-Unmodified-Since': _datetime_to_utc_string(source_if_unmodified_since), + 'x-ms-source-if-Match': _to_str(source_if_match), + 'x-ms-source-if-None-Match': _to_str(source_if_none_match), + 'x-ms-blob-condition-maxsize': _to_str(maxsize_condition), + 'x-ms-blob-condition-appendpos': _to_str(appendpos_condition), + 'x-ms-lease-id': _to_str(lease_id), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match) + } + + _validate_and_format_range_headers(request, source_range_start, source_range_end, + start_range_required=False, + end_range_required=False, + range_header_name="x-ms-source-range") + + return self._perform_request(request, _parse_append_block) + # ----Convenience APIs---------------------------------------------- def append_blob_from_path( diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/baseblobservice.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/baseblobservice.py index adb9127ca5f5..e7ea8c7e6c73 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/baseblobservice.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/baseblobservice.py @@ -37,6 +37,7 @@ _validate_decryption_required, _validate_access_policies, _ERROR_PARALLEL_NOT_SEEKABLE, + _validate_user_delegation_key, ) from ..common._http import HTTPRequest from ..common._serialization import ( @@ -50,7 +51,6 @@ ListGenerator, _OperationContext, ) - from .sharedaccesssignature import ( BlobSharedAccessSignature, ) @@ -59,12 +59,14 @@ _convert_xml_to_containers, _parse_blob, _convert_xml_to_blob_list, + _convert_xml_to_blob_name_list, _parse_container, _parse_snapshot_blob, _parse_lease, _convert_xml_to_signed_identifiers_and_access, _parse_base_properties, _parse_account_information, + _convert_xml_to_user_delegation_key, ) from ._download_chunking import _download_blob_chunks from ._error import ( @@ -74,6 +76,7 @@ from ._serialization import ( _get_path, _validate_and_format_range_headers, + _convert_delegation_key_info_to_xml, ) from .models import ( BlobProperties, @@ -189,7 +192,7 @@ def __init__(self, account_name=None, account_key=None, sas_token=None, is_emula :param token_credential: A token credential used to authenticate HTTPS requests. The token value should be updated before its expiration. - :type `~..common.TokenCredential` + :type `~azure.storage.common.TokenCredential` ''' service_params = _ServiceParameters.get_service_parameters( 'blob', @@ -327,7 +330,7 @@ def generate_account_shared_access_signature(self, resource_types, permission, restricts the request to those IP addresses. :param str protocol: Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~..common.models.Protocol` for possible values. + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. :return: A Shared Access Signature (sas) token. :rtype: str ''' @@ -343,7 +346,7 @@ def generate_container_shared_access_signature(self, container_name, start=None, id=None, ip=None, protocol=None, cache_control=None, content_disposition=None, content_encoding=None, content_language=None, - content_type=None): + content_type=None, user_delegation_key=None): ''' Generates a shared access signature for the container. Use the returned signature with the sas_token parameter of any BlobService. @@ -384,7 +387,7 @@ def generate_container_shared_access_signature(self, container_name, restricts the request to those IP addresses. :param str protocol: Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~..common.models.Protocol` for possible values. + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. :param str cache_control: Response header value for Cache-Control when resource is accessed using this shared access signature. @@ -400,14 +403,24 @@ def generate_container_shared_access_signature(self, container_name, :param str content_type: Response header value for Content-Type when resource is accessed using this shared access signature. + :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key: + Instead of an account key, the user could pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished by calling get_user_delegation_key. + When present, the SAS is signed with the user delegation key instead. :return: A Shared Access Signature (sas) token. :rtype: str ''' _validate_not_none('container_name', container_name) _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - sas = BlobSharedAccessSignature(self.account_name, self.account_key) + if user_delegation_key is not None: + _validate_user_delegation_key(user_delegation_key) + sas = BlobSharedAccessSignature(self.account_name, user_delegation_key=user_delegation_key) + else: + _validate_not_none('self.account_key', self.account_key) + sas = BlobSharedAccessSignature(self.account_name, account_key=self.account_key) + return sas.generate_container( container_name, permission, @@ -424,19 +437,22 @@ def generate_container_shared_access_signature(self, container_name, ) def generate_blob_shared_access_signature( - self, container_name, blob_name, permission=None, + self, container_name, blob_name, snapshot=None, permission=None, expiry=None, start=None, id=None, ip=None, protocol=None, cache_control=None, content_disposition=None, content_encoding=None, content_language=None, - content_type=None): + content_type=None, user_delegation_key=None): ''' - Generates a shared access signature for the blob. + Generates a shared access signature for the blob or one of its snapshots. Use the returned signature with the sas_token parameter of any BlobService. :param str container_name: Name of container. :param str blob_name: Name of blob. + :param str snapshot: + The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to grant permission. :param BlobPermissions permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. @@ -470,7 +486,7 @@ def generate_blob_shared_access_signature( restricts the request to those IP addresses. :param str protocol: Specifies the protocol permitted for a request made. The default value - is https,http. See :class:`~..common.models.Protocol` for possible values. + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. :param str cache_control: Response header value for Cache-Control when resource is accessed using this shared access signature. @@ -486,20 +502,31 @@ def generate_blob_shared_access_signature( :param str content_type: Response header value for Content-Type when resource is accessed using this shared access signature. + :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key: + Instead of an account key, the user could pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished by calling get_user_delegation_key. + When present, the SAS is signed with the user delegation key instead. :return: A Shared Access Signature (sas) token. :rtype: str ''' _validate_not_none('container_name', container_name) _validate_not_none('blob_name', blob_name) _validate_not_none('self.account_name', self.account_name) - _validate_not_none('self.account_key', self.account_key) - sas = BlobSharedAccessSignature(self.account_name, self.account_key) + if user_delegation_key is not None: + _validate_user_delegation_key(user_delegation_key) + sas = BlobSharedAccessSignature(self.account_name, user_delegation_key=user_delegation_key) + else: + _validate_not_none('self.account_key', self.account_key) + sas = BlobSharedAccessSignature(self.account_name, account_key=self.account_key) + return sas.generate_blob( - container_name, - blob_name, - permission, - expiry, + container_name=container_name, + blob_name=blob_name, + snapshot=snapshot, + permission=permission, + expiry=expiry, start=start, id=id, ip=ip, @@ -511,6 +538,33 @@ def generate_blob_shared_access_signature( content_type=content_type, ) + def get_user_delegation_key(self, key_start_time, key_expiry_time, timeout=None): + """ + Obtain a user delegation key for the purpose of signing SAS tokens. + A token credential must be present on the service object for this request to succeed. + + :param datetime key_start_time: + A DateTime value. Indicates when the key becomes valid. + :param datetime key_expiry_time: + A DateTime value. Indicates when the key stops being valid. + :param int timeout: + The timeout parameter is expressed in seconds. + :return: + """ + _validate_not_none('key_start_time', key_start_time) + _validate_not_none('key_end_time', key_expiry_time) + + request = HTTPRequest() + request.method = 'POST' + request.host_locations = self._get_host_locations(secondary=True) + request.query = { + 'restype': 'service', + 'comp': 'userdelegationkey', + 'timeout': _int_to_str(timeout), + } + request.body = _get_request_body(_convert_delegation_key_info_to_xml(key_start_time, key_expiry_time)) + return self._perform_request(request, _convert_xml_to_user_delegation_key) + def list_containers(self, prefix=None, num_results=None, include_metadata=False, marker=None, timeout=None): ''' @@ -784,7 +838,7 @@ def set_container_acl(self, container_name, signed_identifiers=None, A dictionary of access policies to associate with the container. The dictionary may contain up to 5 elements. An empty dictionary will clear the access policies set on the service. - :type signed_identifiers: dict(str, :class:`~..common.models.AccessPolicy`) + :type signed_identifiers: dict(str, :class:`~azure.storage.common.models.AccessPolicy`) :param ~azure.storage.blob.models.PublicAccess public_access: Possible values include: container, blob. :param str lease_id: @@ -1244,14 +1298,66 @@ def list_blobs(self, container_name, prefix=None, num_results=None, include=None args = (container_name,) kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, 'include': include, 'delimiter': delimiter, 'timeout': timeout, - '_context': operation_context} + '_context': operation_context, + '_converter': _convert_xml_to_blob_list} + resp = self._list_blobs(*args, **kwargs) + + return ListGenerator(resp, self._list_blobs, args, kwargs) + + def list_blob_names(self, container_name, prefix=None, num_results=None, + include=None, delimiter=None, marker=None, + timeout=None): + ''' + Returns a generator to list the blob names under the specified container. + The generator will lazily follow the continuation tokens returned by + the service and stop when all blobs have been returned or num_results is reached. + + If num_results is specified and the account has more than that number of + blobs, the generator will have a populated next_marker field once it + finishes. This marker can be used to create a new generator if more + results are desired. + + :param str container_name: + Name of existing container. + :param str prefix: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param int num_results: + Specifies the maximum number of blobs to return, + including all :class:`BlobPrefix` elements. If the request does not specify + num_results or specifies a value greater than 5,000, the server will + return up to 5,000 items. Setting num_results to a value less than + or equal to zero results in error response code 400 (Bad Request). + :param ~azure.storage.blob.models.Include include: + Specifies one or more additional datasets to include in the response. + :param str delimiter: + When the request includes this parameter, the operation + returns a :class:`~azure.storage.blob.models.BlobPrefix` element in the + result list that acts as a placeholder for all blobs whose names begin + with the same substring up to the appearance of the delimiter character. + The delimiter may be a single character or a string. + :param str marker: + An opaque continuation token. This value can be retrieved from the + next_marker field of a previous generator object if num_results was + specified and that generator has finished enumerating results. If + specified, this generator will begin returning results from the point + where the previous generator stopped. + :param int timeout: + The timeout parameter is expressed in seconds. + ''' + operation_context = _OperationContext(location_lock=True) + args = (container_name,) + kwargs = {'prefix': prefix, 'marker': marker, 'max_results': num_results, + 'include': include, 'delimiter': delimiter, 'timeout': timeout, + '_context': operation_context, + '_converter': _convert_xml_to_blob_name_list} resp = self._list_blobs(*args, **kwargs) return ListGenerator(resp, self._list_blobs, args, kwargs) def _list_blobs(self, container_name, prefix=None, marker=None, max_results=None, include=None, delimiter=None, timeout=None, - _context=None): + _context=None, _converter=None): ''' Returns the list of blobs under the specified container. @@ -1320,7 +1426,7 @@ def _list_blobs(self, container_name, prefix=None, marker=None, 'timeout': _int_to_str(timeout), } - return self._perform_request(request, _convert_xml_to_blob_list, operation_context=_context) + return self._perform_request(request, _converter, operation_context=_context) def get_blob_account_information(self, container_name=None, blob_name=None, timeout=None): """ @@ -1371,7 +1477,7 @@ def get_blob_service_stats(self, timeout=None): :param int timeout: The timeout parameter is expressed in seconds. :return: The blob service stats. - :rtype: :class:`~..common.models.ServiceStats` + :rtype: :class:`~azure.storage.common.models.ServiceStats` ''' request = HTTPRequest() request.method = 'GET' @@ -1396,22 +1502,22 @@ def set_blob_service_properties( :param logging: Groups the Azure Analytics Logging settings. :type logging: - :class:`~..common.models.Logging` + :class:`~azure.storage.common.models.Logging` :param hour_metrics: The hour metrics settings provide a summary of request statistics grouped by API in hourly aggregates for blobs. :type hour_metrics: - :class:`~..common.models.Metrics` + :class:`~azure.storage.common.models.Metrics` :param minute_metrics: The minute metrics settings provide request statistics for each minute for blobs. :type minute_metrics: - :class:`~..common.models.Metrics` + :class:`~azure.storage.common.models.Metrics` :param cors: You can include up to five CorsRule elements in the list. If an empty list is specified, all CORS rules will be deleted, and CORS will be disabled for the service. - :type cors: list(:class:`~..common.models.CorsRule`) + :type cors: list(:class:`~azure.storage.common.models.CorsRule`) :param str target_version: Indicates the default version to use for requests if an incoming request's version is not specified. @@ -1421,13 +1527,18 @@ def set_blob_service_properties( The delete retention policy specifies whether to retain deleted blobs. It also specifies the number of days and versions of blob to keep. :type delete_retention_policy: - :class:`~..common.models.DeleteRetentionPolicy` + :class:`~azure.storage.common.models.DeleteRetentionPolicy` :param static_website: Specifies whether the static website feature is enabled, and if yes, indicates the index document and 404 error document to use. :type static_website: - :class:`~..common.models.StaticWebsite` + :class:`~azure.storage.common.models.StaticWebsite` ''' + if all(parameter is None for parameter in [logging, hour_metrics, minute_metrics, cors, target_version, + delete_retention_policy, static_website]): + + raise ValueError("set_blob_service_properties should be called with at least one parameter") + request = HTTPRequest() request.method = 'PUT' request.host_locations = self._get_host_locations() @@ -1450,7 +1561,7 @@ def get_blob_service_properties(self, timeout=None): :param int timeout: The timeout parameter is expressed in seconds. - :return: The blob :class:`~..common.models.ServiceProperties` with an attached + :return: The blob :class:`~azure.storage.common.models.ServiceProperties` with an attached target_version property. ''' request = HTTPRequest() @@ -1977,11 +2088,11 @@ def get_blob_to_stream( if max_connections > 1: if sys.version_info >= (3,) and not stream.seekable(): raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) - else: - try: - stream.seek(stream.tell()) - except (NotImplementedError, AttributeError): - raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(_ERROR_PARALLEL_NOT_SEEKABLE) # The service only provides transactional MD5s for chunks under 4MB. # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first @@ -3071,7 +3182,7 @@ def copy_blob(self, container_name, blob_name, copy_source, destination_if_none_match, destination_lease_id, source_lease_id, timeout, - False) + False, False) def _copy_blob(self, container_name, blob_name, copy_source, metadata=None, @@ -3085,12 +3196,16 @@ def _copy_blob(self, container_name, blob_name, copy_source, destination_if_none_match=None, destination_lease_id=None, source_lease_id=None, timeout=None, - incremental_copy=False): + incremental_copy=False, + requires_sync=None): ''' See copy_blob for more details. This helper method - allows for standard copies as well as incremental copies which are only supported for page blobs. + allows for standard copies as well as incremental copies which are only supported for page blobs and sync + copies which are only supported for block blobs. :param bool incremental_copy: - The timeout parameter is expressed in seconds. + Performs an incremental copy operation on a page blob instead of a standard copy operation. + :param bool requires_sync: + Enforces that the service will not return a response until the copy is complete. ''' _validate_not_none('container_name', container_name) _validate_not_none('blob_name', blob_name) @@ -3137,8 +3252,10 @@ def _copy_blob(self, container_name, blob_name, copy_source, 'If-None-Match': _to_str(destination_if_none_match), 'x-ms-lease-id': _to_str(destination_lease_id), 'x-ms-source-lease-id': _to_str(source_lease_id), - 'x-ms-access-tier': _to_str(premium_page_blob_tier) + 'x-ms-access-tier': _to_str(premium_page_blob_tier), + 'x-ms-requires-sync': _to_str(requires_sync) } + _add_metadata_headers(metadata, request) return self._perform_request(request, _parse_properties, [BlobProperties]).copy diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/blockblobservice.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/blockblobservice.py index abd693974656..26900d3c6149 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/blockblobservice.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/blockblobservice.py @@ -39,7 +39,6 @@ from ..common._serialization import ( _len_plus ) - from ._deserialization import ( _convert_xml_to_block_list, _parse_base_properties, @@ -51,6 +50,7 @@ from ._serialization import ( _convert_block_list_to_xml, _get_path, + _validate_and_format_range_headers, ) from ._upload_chunking import ( _BlockBlobChunkUploader, @@ -316,8 +316,9 @@ def get_block_list(self, container_name, blob_name, snapshot=None, return self._perform_request(request, _convert_xml_to_block_list) - def put_block_from_url(self, container_name, blob_name, copy_source_url, source_range_start, source_range_end, - block_id, source_content_md5=None, lease_id=None, timeout=None): + def put_block_from_url(self, container_name, blob_name, copy_source_url, block_id, + source_range_start=None, source_range_end=None, + source_content_md5=None, lease_id=None, timeout=None): """ Creates a new block to be committed as part of a blob. @@ -349,8 +350,6 @@ def put_block_from_url(self, container_name, blob_name, copy_source_url, source_ _validate_not_none('container_name', container_name) _validate_not_none('blob_name', blob_name) _validate_not_none('copy_source_url', copy_source_url) - _validate_not_none('source_range_start', source_range_start) - _validate_not_none('source_range_end', source_range_end) _validate_not_none('block_id', block_id) request = HTTPRequest() @@ -365,9 +364,16 @@ def put_block_from_url(self, container_name, blob_name, copy_source_url, source_ request.headers = { 'x-ms-lease-id': _to_str(lease_id), 'x-ms-copy-source': copy_source_url, - 'x-ms-source-range': 'bytes=' + _to_str(source_range_start) + '-' + _to_str(source_range_end), 'x-ms-source-content-md5': source_content_md5, } + _validate_and_format_range_headers( + request, + source_range_start, + source_range_end, + start_range_required=False, + end_range_required=False, + range_header_name="x-ms-source-range" + ) self._perform_request(request) @@ -881,6 +887,136 @@ def set_standard_blob_tier( self._perform_request(request) + def copy_blob(self, container_name, blob_name, copy_source, + metadata=None, source_if_modified_since=None, + source_if_unmodified_since=None, source_if_match=None, + source_if_none_match=None, destination_if_modified_since=None, + destination_if_unmodified_since=None, destination_if_match=None, + destination_if_none_match=None, destination_lease_id=None, + source_lease_id=None, timeout=None, requires_sync=None): + + ''' + Copies a blob. This operation returns a copy operation + properties object. The copy operation may be configured to either be an + asynchronous, best-effort operation, or a synchronous operation. + + The source must be a block blob if requires_sync is true. Any existing + destination blob will be overwritten. The destination blob cannot be + modified while a copy operation is in progress. + + When copying from a block blob, all committed blocks and their block IDs are + copied. Uncommitted blocks are not copied. At the end of the copy operation, + the destination blob will have the same committed block count as the source. + + You can call get_blob_properties on the destination blob to check the status + of the copy operation. The final blob will be committed when the copy completes. + + :param str container_name: + Name of the destination container. The container must exist. + :param str blob_name: + Name of the destination blob. If the destination blob exists, it will + be overwritten. Otherwise, it will be created. + :param str copy_source: + A URL of up to 2 KB in length that specifies an Azure file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :param metadata: + Name-value pairs associated with the blob as metadata. If no name-value + pairs are specified, the operation will copy the metadata from the + source blob or file to the destination blob. If one or more name-value + pairs are specified, the destination blob is created with the specified + metadata, and metadata is not copied from the source blob or file. + :type metadata: dict(str, str) + :param datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source + blob has been modified since the specified date/time. + :param datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source blob + has not been modified since the specified date/time. + :param ETag source_if_match: + An ETag value, or the wildcard character (*). Specify this conditional + header to copy the source blob only if its ETag matches the value + specified. If the ETag values do not match, the Blob service returns + status code 412 (Precondition Failed). This header cannot be specified + if the source is an Azure File. + :param ETag source_if_none_match: + An ETag value, or the wildcard character (*). Specify this conditional + header to copy the blob only if its ETag does not match the value + specified. If the values are identical, the Blob service returns status + code 412 (Precondition Failed). This header cannot be specified if the + source is an Azure File. + :param datetime destination_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has been modified since the specified date/time. + If the destination blob has not been modified, the Blob service returns + status code 412 (Precondition Failed). + :param datetime destination_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has not been modified since the specified + date/time. If the destination blob has been modified, the Blob service + returns status code 412 (Precondition Failed). + :param ETag destination_if_match: + An ETag value, or the wildcard character (*). Specify an ETag value for + this conditional header to copy the blob only if the specified ETag value + matches the ETag value for an existing destination blob. If the ETag for + the destination blob does not match the ETag specified for If-Match, the + Blob service returns status code 412 (Precondition Failed). + :param ETag destination_if_none_match: + An ETag value, or the wildcard character (*). Specify an ETag value for + this conditional header to copy the blob only if the specified ETag value + does not match the ETag value for the destination blob. Specify the wildcard + character (*) to perform the operation only if the destination blob does not + exist. If the specified condition isn't met, the Blob service returns status + code 412 (Precondition Failed). + :param str destination_lease_id: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :param str source_lease_id: + Specify this to perform the Copy Blob operation only if + the lease ID given matches the active lease ID of the source blob. + :param int timeout: + The timeout parameter is expressed in seconds. + :param bool requires_sync: + Enforces that the service will not return a response until the copy is complete. + :return: Copy operation properties such as status, source, and ID. + :rtype: :class:`~azure.storage.blob.models.CopyProperties` + ''' + + return self._copy_blob(container_name, blob_name, copy_source, + metadata, + premium_page_blob_tier=None, + source_if_modified_since=source_if_modified_since, + source_if_unmodified_since=source_if_unmodified_since, + source_if_match=source_if_match, + source_if_none_match=source_if_none_match, + destination_if_modified_since=destination_if_modified_since, + destination_if_unmodified_since=destination_if_unmodified_since, + destination_if_match=destination_if_match, + destination_if_none_match=destination_if_none_match, + destination_lease_id=destination_lease_id, + source_lease_id=source_lease_id, timeout=timeout, + incremental_copy=False, + requires_sync=requires_sync) + # -----Helper methods------------------------------------ def _put_blob(self, container_name, blob_name, blob, content_settings=None, metadata=None, validate_content=False, lease_id=None, if_modified_since=None, diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/models.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/models.py index e39067aa3ac1..225d0f9370e8 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/models.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/models.py @@ -3,7 +3,6 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- - from ..common._common_conversion import _to_str @@ -670,13 +669,19 @@ class ContainerPermissions(object): a container SAS. Use an account SAS instead. ''' - def __init__(self, read=False, write=False, delete=False, list=False, + def __init__(self, read=False, add=False, create=False, write=False, delete=False, list=False, _str=None): ''' :param bool read: Read the content, properties, metadata or block list of any blob in the container. Use any blob in the container as the source of a copy operation. - :param bool write: + :param bool add: + Add a block to any append blob in the container. + :param bool create: + Write a new blob to the container, snapshot any blob in the container, or copy a blob to + a new blob in the container. Note: You cannot grant permissions to create a container + with a container SAS. Use an account SAS to create a container instead. + :param bool write: For any blob in the container, create or write content, properties, metadata, or block list. Snapshot or lease the blob. Resize the blob (page blob only). Use the blob as the destination of a copy operation @@ -694,6 +699,8 @@ def __init__(self, read=False, write=False, delete=False, list=False, if not _str: _str = '' self.read = read or ('r' in _str) + self.add = add or ('a' in _str) + self.create = create or ('c' in _str) self.write = write or ('w' in _str) self.delete = delete or ('d' in _str) self.list = list or ('l' in _str) @@ -706,6 +713,8 @@ def __add__(self, other): def __str__(self): return (('r' if self.read else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + ('w' if self.write else '') + ('d' if self.delete else '') + ('l' if self.list else '')) @@ -715,6 +724,8 @@ def __str__(self): ContainerPermissions.LIST = ContainerPermissions(list=True) ContainerPermissions.READ = ContainerPermissions(read=True) ContainerPermissions.WRITE = ContainerPermissions(write=True) +ContainerPermissions.ADD = ContainerPermissions(add=True) +ContainerPermissions.CREATE = ContainerPermissions(create=True) class PremiumPageBlobTier(object): @@ -779,3 +790,36 @@ class AccountInformation(object): def __init__(self): self.sku_name = None self.account_kind = None + + +class UserDelegationKey(object): + """ + Represents a user delegation key, provided to the user by Azure Storage + based on their Azure Active Directory access token. + + The fields are saved as simple strings since the user does not have to interact with this object; + to generate an identify SAS, the user can simply pass it to the right API. + + :ivar str signed_oid: + Object ID of this token. + :ivar str signed_tid: + Tenant ID of the tenant that issued this token. + :ivar str signed_start: + The datetime this token becomes valid. + :ivar str signed_expiry: + The datetime this token expires. + :ivar str signed_service: + What service this key is valid for. + :ivar str signed_version: + The version identifier of the REST service that created this token. + :ivar str value: + The user delegation key. + """ + def __init__(self): + self.signed_oid = None + self.signed_tid = None + self.signed_start = None + self.signed_expiry = None + self.signed_service = None + self.signed_version = None + self.value = None diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/pageblobservice.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/pageblobservice.py index 476d55a49071..3c3217b285f3 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/pageblobservice.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/pageblobservice.py @@ -6,7 +6,6 @@ import sys from os import path - from ..common._common_conversion import ( _int_to_str, _to_str, @@ -29,7 +28,6 @@ _get_data_bytes_only, _add_metadata_headers, ) - from ._deserialization import ( _convert_xml_to_page_ranges, _parse_page_properties, @@ -382,6 +380,136 @@ def update_page( timeout=timeout ) + def update_page_from_url(self, container_name, blob_name, start_range, end_range, copy_source_url, + source_range_start, source_content_md5=None, source_if_modified_since=None, + source_if_unmodified_since=None, source_if_match=None, source_if_none_match=None, + lease_id=None, if_sequence_number_lte=None, if_sequence_number_lt=None, + if_sequence_number_eq=None, if_modified_since=None, if_unmodified_since=None, + if_match=None, if_none_match=None, timeout=None): + """ + Updates a range of pages to a page blob where the contents are read from a URL. + + :param str container_name: + Name of existing container. + :param str blob_name: + Name of blob. + :param int start_range: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the end offset must be a modulus of + 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. + :param int end_range: + End of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the end offset must be a modulus of + 512-1. Examples of valid byte ranges are 0-511, 512-1023, etc. + :param str copy_source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int source_range_start: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (end_range-start_range). + :param str source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :param datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :param datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :param str source_if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the source resource's ETag matches the value specified. + :param str source_if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the source resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the source resource does not exist, and fail the + operation if it does exist. + :param str lease_id: + Required if the blob has an active lease. + :param int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :param int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :param int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :param datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :param datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :param str if_match: + An ETag value, or the wildcard character (*). Specify this header to perform + the operation only if the resource's ETag matches the value specified. + :param str if_none_match: + An ETag value, or the wildcard character (*). Specify this header + to perform the operation only if the resource's ETag does not match + the value specified. Specify the wildcard character (*) to perform + the operation only if the resource does not exist, and fail the + operation if it does exist. + :param int timeout: + The timeout parameter is expressed in seconds. + """ + _validate_encryption_unsupported(self.require_encryption, self.key_encryption_key) + _validate_not_none('container_name', container_name) + _validate_not_none('blob_name', blob_name) + _validate_not_none('copy_source_url', copy_source_url) + + request = HTTPRequest() + request.method = 'PUT' + request.host_locations = self._get_host_locations() + request.path = _get_path(container_name, blob_name) + request.query = { + 'comp': 'page', + 'timeout': _int_to_str(timeout), + } + request.headers = { + 'x-ms-page-write': 'update', + 'x-ms-copy-source': copy_source_url, + 'x-ms-source-content-md5': source_content_md5, + 'x-ms-source-if-Modified-Since': _datetime_to_utc_string(source_if_modified_since), + 'x-ms-source-if-Unmodified-Since': _datetime_to_utc_string(source_if_unmodified_since), + 'x-ms-source-if-Match': _to_str(source_if_match), + 'x-ms-source-if-None-Match': _to_str(source_if_none_match), + 'x-ms-lease-id': _to_str(lease_id), + 'x-ms-if-sequence-number-le': _to_str(if_sequence_number_lte), + 'x-ms-if-sequence-number-lt': _to_str(if_sequence_number_lt), + 'x-ms-if-sequence-number-eq': _to_str(if_sequence_number_eq), + 'If-Modified-Since': _datetime_to_utc_string(if_modified_since), + 'If-Unmodified-Since': _datetime_to_utc_string(if_unmodified_since), + 'If-Match': _to_str(if_match), + 'If-None-Match': _to_str(if_none_match) + } + _validate_and_format_range_headers( + request, + start_range, + end_range, + align_to_page=True) + _validate_and_format_range_headers( + request, + source_range_start, + source_range_start+(end_range-start_range), + range_header_name="x-ms-source-range") + + return self._perform_request(request, _parse_page_properties) + def clear_page( self, container_name, blob_name, start_range, end_range, lease_id=None, if_sequence_number_lte=None, diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/sharedaccesssignature.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/sharedaccesssignature.py index 6947e7e10ff7..a9538c9d65ba 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/sharedaccesssignature.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/blob/sharedaccesssignature.py @@ -7,9 +7,13 @@ from ..common.sharedaccesssignature import ( SharedAccessSignature, _SharedAccessHelper, + _QueryStringConstants, + _sign_string, ) - from ._constants import X_MS_VERSION +from ..common._serialization import ( + url_quote, +) class BlobSharedAccessSignature(SharedAccessSignature): @@ -20,28 +24,36 @@ class BlobSharedAccessSignature(SharedAccessSignature): generate_*_shared_access_signature method directly. ''' - def __init__(self, account_name, account_key): + def __init__(self, account_name, account_key=None, user_delegation_key=None): ''' :param str account_name: The storage account name used to generate the shared access signatures. :param str account_key: The access key to generate the shares access signatures. + :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key: + Instead of an account key, the user could pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished by calling get_user_delegation_key on any Blob service object. ''' super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) + self.user_delegation_key = user_delegation_key - def generate_blob(self, container_name, blob_name, permission=None, + def generate_blob(self, container_name, blob_name, snapshot=None, permission=None, expiry=None, start=None, id=None, ip=None, protocol=None, cache_control=None, content_disposition=None, content_encoding=None, content_language=None, content_type=None): ''' - Generates a shared access signature for the blob. + Generates a shared access signature for the blob or one of its snapshots. Use the returned signature with the sas_token parameter of any BlobService. :param str container_name: Name of container. :param str blob_name: Name of blob. + :param str snapshot: + The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to grant permission. :param BlobPermissions permission: The permissions associated with the shared access signature. The user is restricted to operations allowed by the permissions. @@ -95,14 +107,16 @@ def generate_blob(self, container_name, blob_name, permission=None, ''' resource_path = container_name + '/' + blob_name - sas = _SharedAccessHelper() + sas = _BlobSharedAccessHelper() sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) sas.add_id(id) - sas.add_resource('b') + sas.add_resource('b' if snapshot is None else 'bs') + sas.add_timestamp(snapshot) sas.add_override_response_headers(cache_control, content_disposition, content_encoding, content_language, content_type) - sas.add_resource_signature(self.account_name, self.account_key, 'blob', resource_path) + sas.add_resource_signature(self.account_name, self.account_key, resource_path, + user_delegation_key=self.user_delegation_key) return sas.get_token() @@ -168,13 +182,94 @@ def generate_container(self, container_name, permission=None, expiry=None, Response header value for Content-Type when resource is accessed using this shared access signature. ''' - sas = _SharedAccessHelper() + sas = _BlobSharedAccessHelper() sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) sas.add_id(id) sas.add_resource('c') sas.add_override_response_headers(cache_control, content_disposition, content_encoding, content_language, content_type) - sas.add_resource_signature(self.account_name, self.account_key, 'blob', container_name) - + sas.add_resource_signature(self.account_name, self.account_key, container_name, + user_delegation_key=self.user_delegation_key) return sas.get_token() + + +class _BlobQueryStringConstants(_QueryStringConstants): + SIGNED_TIMESTAMP = 'snapshot' + SIGNED_OID = 'skoid' + SIGNED_TID = 'sktid' + SIGNED_KEY_START = 'skt' + SIGNED_KEY_EXPIRY = 'ske' + SIGNED_KEY_SERVICE = 'sks' + SIGNED_KEY_VERSION = 'skv' + + +class _BlobSharedAccessHelper(_SharedAccessHelper): + def __init__(self): + super(_BlobSharedAccessHelper, self).__init__() + + def add_timestamp(self, timestamp): + self._add_query(_BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp) + + def get_value_to_append(self, query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None): + if path[0] != '/': + path = '/' + path + + canonicalized_resource = '/blob/' + account_name + path + '\n' + + # Form the string to sign from shared_access_policy and canonicalized + # resource. The order of values is important. + string_to_sign = \ + (self.get_value_to_append(_BlobQueryStringConstants.SIGNED_PERMISSION) + + self.get_value_to_append(_BlobQueryStringConstants.SIGNED_START) + + self.get_value_to_append(_BlobQueryStringConstants.SIGNED_EXPIRY) + + canonicalized_resource) + + if user_delegation_key is not None: + self._add_query(_BlobQueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid) + self._add_query(_BlobQueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid) + self._add_query(_BlobQueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start) + self._add_query(_BlobQueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry) + self._add_query(_BlobQueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service) + self._add_query(_BlobQueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version) + + string_to_sign += \ + (self.get_value_to_append(_BlobQueryStringConstants.SIGNED_OID) + + self.get_value_to_append(_BlobQueryStringConstants.SIGNED_TID) + + self.get_value_to_append(_BlobQueryStringConstants.SIGNED_KEY_START) + + self.get_value_to_append(_BlobQueryStringConstants.SIGNED_KEY_EXPIRY) + + self.get_value_to_append(_BlobQueryStringConstants.SIGNED_KEY_SERVICE) + + self.get_value_to_append(_BlobQueryStringConstants.SIGNED_KEY_VERSION)) + else: + string_to_sign += self.get_value_to_append(_BlobQueryStringConstants.SIGNED_IDENTIFIER) + + string_to_sign += \ + (self.get_value_to_append(_BlobQueryStringConstants.SIGNED_IP) + + self.get_value_to_append(_BlobQueryStringConstants.SIGNED_PROTOCOL) + + self.get_value_to_append(_BlobQueryStringConstants.SIGNED_VERSION) + + self.get_value_to_append(_BlobQueryStringConstants.SIGNED_RESOURCE) + + self.get_value_to_append(_BlobQueryStringConstants.SIGNED_TIMESTAMP) + + self.get_value_to_append(_BlobQueryStringConstants.SIGNED_CACHE_CONTROL) + + self.get_value_to_append(_BlobQueryStringConstants.SIGNED_CONTENT_DISPOSITION) + + self.get_value_to_append(_BlobQueryStringConstants.SIGNED_CONTENT_ENCODING) + + self.get_value_to_append(_BlobQueryStringConstants.SIGNED_CONTENT_LANGUAGE) + + self.get_value_to_append(_BlobQueryStringConstants.SIGNED_CONTENT_TYPE)) + + # remove the trailing newline + if string_to_sign[-1] == '\n': + string_to_sign = string_to_sign[:-1] + + self._add_query(_BlobQueryStringConstants.SIGNED_SIGNATURE, + _sign_string(account_key if user_delegation_key is None else user_delegation_key.value, + string_to_sign)) + + def get_token(self): + # a conscious decision was made to exclude the timestamp in the generated token + # this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp + exclude = [_BlobQueryStringConstants.SIGNED_TIMESTAMP] + return '&'.join(['{0}={1}'.format(n, url_quote(v)) + for n, v in self.query_dict.items() if v is not None and n not in exclude]) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/__init__.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/__init__.py index 797c97069ee1..a646e3811588 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/__init__.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/__init__.py @@ -36,3 +36,4 @@ SharedAccessSignature, ) from .tokencredential import TokenCredential +from ._error import AzureSigningError diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_auth.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_auth.py index 15c15b9ea560..13940f97b6f7 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_auth.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_auth.py @@ -10,10 +10,21 @@ DEV_ACCOUNT_NAME, DEV_ACCOUNT_SECONDARY_NAME ) +import sys +if sys.version_info >= (3,): + from urllib.parse import parse_qsl +else: + from urlparse import parse_qsl + import logging logger = logging.getLogger(__name__) +from ._error import ( + AzureSigningError, + _wrap_exception, +) + class _StorageSharedKeyAuthentication(object): def __init__(self, account_name, account_key, is_emulated=False): @@ -54,9 +65,14 @@ def _get_canonicalized_headers(self, request): return string_to_sign def _add_authorization_header(self, request, string_to_sign): - signature = _sign_string(self.account_key, string_to_sign) - auth_string = 'SharedKey ' + self.account_name + ':' + signature - request.headers['Authorization'] = auth_string + try: + signature = _sign_string(self.account_key, string_to_sign) + auth_string = 'SharedKey ' + self.account_name + ':' + signature + request.headers['Authorization'] = auth_string + except Exception as ex: + # Wrap any error that occurred as signing error + # Doing so will clarify/locate the source of problem + raise _wrap_exception(ex, AzureSigningError) class _StorageSharedKeyAuthentication(_StorageSharedKeyAuthentication): @@ -100,18 +116,14 @@ def __init__(self, sas_token): # ignore ?-prefix (added by tools such as Azure Portal) on sas tokens # doing so avoids double question marks when signing if sas_token[0] == '?': - self.sas_token = sas_token[1:] - else: - self.sas_token = sas_token + sas_token = sas_token[1:] + + self.sas_qs = parse_qsl(sas_token) def sign_request(self, request): - # if 'sig=' is present, then the request has already been signed + # if 'sig' is present, then the request has already been signed # as is the case when performing retries - if 'sig=' in request.path: + if 'sig' in request.query: return - if '?' in request.path: - request.path += '&' - else: - request.path += '?' - request.path += self.sas_token + request.query.update(self.sas_qs) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_connection.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_connection.py index 1388fddeb625..6836cf91fad6 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_connection.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_connection.py @@ -60,9 +60,10 @@ def __init__(self, service, account_name=None, account_key=None, sas_token=None, # Only set the account key if a sas_token is not present to allow sas to be used with the emulator self.account_key = DEV_ACCOUNT_KEY if not self.sas_token else None + emulator_endpoint = _EMULATOR_ENDPOINTS[service] if custom_domain is None else custom_domain - self.primary_endpoint = '{}/{}'.format(_EMULATOR_ENDPOINTS[service], DEV_ACCOUNT_NAME) - self.secondary_endpoint = '{}/{}'.format(_EMULATOR_ENDPOINTS[service], DEV_ACCOUNT_SECONDARY_NAME) + self.primary_endpoint = '{}/{}'.format(emulator_endpoint, DEV_ACCOUNT_NAME) + self.secondary_endpoint = '{}/{}'.format(emulator_endpoint, DEV_ACCOUNT_SECONDARY_NAME) else: # Strip whitespace from the key if self.account_key: @@ -108,7 +109,7 @@ def get_service_parameters(service, account_name=None, account_key=None, sas_tok if connection_string: params = _ServiceParameters._from_connection_string(connection_string, service) elif is_emulated: - params = _ServiceParameters(service, is_emulated=True) + params = _ServiceParameters(service, is_emulated=True, custom_domain=custom_domain) elif account_name: if protocol.lower() != 'https' and token_credential is not None: raise ValueError("Token credential is only supported with HTTPS.") diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_constants.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_constants.py index 22516d640757..22d93b3a2cd6 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_constants.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_constants.py @@ -7,7 +7,7 @@ import sys __author__ = 'Microsoft Corp. ' -__version__ = '1.3.0' +__version__ = '2.0.0' # UserAgent string sample: 'Azure-Storage/0.37.0-0.38.0 (Python CPython 3.4.2; Windows 8)' # First version(0.37.0) is the common package, and the second version(0.38.0) is the service package @@ -45,3 +45,7 @@ # Encryption constants _ENCRYPTION_PROTOCOL_V1 = '1.0' + +_AUTHORIZATION_HEADER_NAME = 'Authorization' +_COPY_SOURCE_HEADER_NAME = 'x-ms-copy-source' +_REDACTED_VALUE = 'REDACTED' diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_error.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_error.py index 90faa0124ab2..5c8e393197c9 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_error.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/_error.py @@ -181,3 +181,38 @@ def _validate_kek_id(kid, resolved_id): def _validate_encryption_unsupported(require_encryption, key_encryption_key): if require_encryption or (key_encryption_key is not None): raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + +def _validate_user_delegation_key(user_delegation_key): + _validate_not_none('user_delegation_key.signed_oid', user_delegation_key.signed_oid) + _validate_not_none('user_delegation_key.signed_tid', user_delegation_key.signed_tid) + _validate_not_none('user_delegation_key.signed_start', user_delegation_key.signed_start) + _validate_not_none('user_delegation_key.signed_expiry', user_delegation_key.signed_expiry) + _validate_not_none('user_delegation_key.signed_version', user_delegation_key.signed_version) + _validate_not_none('user_delegation_key.signed_service', user_delegation_key.signed_service) + _validate_not_none('user_delegation_key.value', user_delegation_key.value) + + +# wraps a given exception with the desired exception type +def _wrap_exception(ex, desired_type): + msg = "" + if len(ex.args) > 0: + msg = ex.args[0] + if version_info >= (3,): + # Automatic chaining in Python 3 means we keep the trace + return desired_type(msg) + else: + # There isn't a good solution in 2 for keeping the stack trace + # in general, or that will not result in an error in 3 + # However, we can keep the previous error type and message + # TODO: In the future we will log the trace + return desired_type('{}: {}'.format(ex.__class__.__name__, msg)) + + +class AzureSigningError(AzureException): + """ + Represents a fatal error when attempting to sign a request. + In general, the cause of this exception is user error. For example, the given account key is not valid. + Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. + """ + pass diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/cloudstorageaccount.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/cloudstorageaccount.py index f3ac1aa7be70..459146849163 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/cloudstorageaccount.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/cloudstorageaccount.py @@ -19,18 +19,6 @@ SharedAccessSignature, ) -''' -from azure.storage.common._error import _validate_not_none -from azure.storage.common.models import ( - ResourceTypes, - Services, - AccountPermissions, -) -from azure.storage.common.sharedaccesssignature import ( - SharedAccessSignature, -) -''' - class CloudStorageAccount(object): """ @@ -39,7 +27,8 @@ class CloudStorageAccount(object): use the factory or can construct the appropriate service directly. """ - def __init__(self, account_name=None, account_key=None, sas_token=None, is_emulated=None): + def __init__(self, account_name=None, account_key=None, sas_token=None, + is_emulated=None, endpoint_suffix=None): ''' :param str account_name: The storage account name. This is used to authenticate requests @@ -52,13 +41,17 @@ def __init__(self, account_name=None, account_key=None, sas_token=None, is_emula instead of the account key. If account key and sas token are both specified, account key will be used to sign. :param bool is_emulated: - Whether to use the emulator. Defaults to False. If specified, will + Whether to use the emulator. Defaults to False. If specified, will override all other parameters. + :param str endpoint_suffix: + The host base component of the url, minus the account name. Defaults + to Azure (core.windows.net). Override this to use a sovereign cloud. ''' self.account_name = account_name self.account_key = account_key self.sas_token = sas_token self.is_emulated = is_emulated + self.endpoint_suffix = endpoint_suffix def create_block_blob_service(self): ''' @@ -72,7 +65,8 @@ def create_block_blob_service(self): from azure.storage.blob.blockblobservice import BlockBlobService return BlockBlobService(self.account_name, self.account_key, sas_token=self.sas_token, - is_emulated=self.is_emulated) + is_emulated=self.is_emulated, + endpoint_suffix=self.endpoint_suffix) except ImportError: raise Exception('The package azure-storage-blob is required. ' + 'Please install it using "pip install azure-storage-blob"') @@ -89,7 +83,8 @@ def create_page_blob_service(self): from azure.storage.blob.pageblobservice import PageBlobService return PageBlobService(self.account_name, self.account_key, sas_token=self.sas_token, - is_emulated=self.is_emulated) + is_emulated=self.is_emulated, + endpoint_suffix=self.endpoint_suffix) except ImportError: raise Exception('The package azure-storage-blob is required. ' + 'Please install it using "pip install azure-storage-blob"') @@ -106,7 +101,8 @@ def create_append_blob_service(self): from azure.storage.blob.appendblobservice import AppendBlobService return AppendBlobService(self.account_name, self.account_key, sas_token=self.sas_token, - is_emulated=self.is_emulated) + is_emulated=self.is_emulated, + endpoint_suffix=self.endpoint_suffix) except ImportError: raise Exception('The package azure-storage-blob is required. ' + 'Please install it using "pip install azure-storage-blob"') @@ -123,7 +119,8 @@ def create_queue_service(self): from azure.storage.queue.queueservice import QueueService return QueueService(self.account_name, self.account_key, sas_token=self.sas_token, - is_emulated=self.is_emulated) + is_emulated=self.is_emulated, + endpoint_suffix=self.endpoint_suffix) except ImportError: raise Exception('The package azure-storage-queue is required. ' + 'Please install it using "pip install azure-storage-queue"') @@ -139,7 +136,8 @@ def create_file_service(self): try: from azure.storage.file.fileservice import FileService return FileService(self.account_name, self.account_key, - sas_token=self.sas_token) + sas_token=self.sas_token, + endpoint_suffix=self.endpoint_suffix) except ImportError: raise Exception('The package azure-storage-file is required. ' + 'Please install it using "pip install azure-storage-file"') diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/retry.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/retry.py index 85764430259d..d18c84d80b55 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/retry.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/retry.py @@ -151,7 +151,7 @@ def _retry(self, context, backoff): self._set_next_host_location(context) # rewind the request body if it is a stream - if hasattr(context.request.body, 'read'): + if hasattr(context.request, 'body') and hasattr(context.request.body, 'read'): # no position was saved, then retry would not work if context.body_position is None: return None diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/sharedaccesssignature.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/sharedaccesssignature.py index c23201a85bcf..ae55b2a6aaed 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/sharedaccesssignature.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/sharedaccesssignature.py @@ -157,43 +157,6 @@ def add_override_response_headers(self, cache_control, self._add_query(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) self._add_query(_QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) - def add_resource_signature(self, account_name, account_key, service, path): - def get_value_to_append(query): - return_value = self.query_dict.get(query) or '' - return return_value + '\n' - - if path[0] != '/': - path = '/' + path - - canonicalized_resource = '/' + service + '/' + account_name + path + '\n' - - # Form the string to sign from shared_access_policy and canonicalized - # resource. The order of values is important. - string_to_sign = \ - (get_value_to_append(_QueryStringConstants.SIGNED_PERMISSION) + - get_value_to_append(_QueryStringConstants.SIGNED_START) + - get_value_to_append(_QueryStringConstants.SIGNED_EXPIRY) + - canonicalized_resource + - get_value_to_append(_QueryStringConstants.SIGNED_IDENTIFIER) + - get_value_to_append(_QueryStringConstants.SIGNED_IP) + - get_value_to_append(_QueryStringConstants.SIGNED_PROTOCOL) + - get_value_to_append(_QueryStringConstants.SIGNED_VERSION)) - - if service == 'blob' or service == 'file': - string_to_sign += \ - (get_value_to_append(_QueryStringConstants.SIGNED_CACHE_CONTROL) + - get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + - get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_ENCODING) + - get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + - get_value_to_append(_QueryStringConstants.SIGNED_CONTENT_TYPE)) - - # remove the trailing newline - if string_to_sign[-1] == '\n': - string_to_sign = string_to_sign[:-1] - - self._add_query(_QueryStringConstants.SIGNED_SIGNATURE, - _sign_string(account_key, string_to_sign)) - def add_account_signature(self, account_name, account_key): def get_value_to_append(query): return_value = self.query_dict.get(query) or '' diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/storageclient.py b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/storageclient.py index 859a729df466..41ae2c627805 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/storageclient.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/storage/common/storageclient.py @@ -4,14 +4,12 @@ # license information. # -------------------------------------------------------------------------- -import sys +import requests from abc import ABCMeta import logging - -logger = logging.getLogger(__name__) from time import sleep +import sys -import requests from azure.common import ( AzureException, AzureHttpError, @@ -23,10 +21,15 @@ DEFAULT_USER_AGENT_STRING, USER_AGENT_STRING_PREFIX, USER_AGENT_STRING_SUFFIX, + _AUTHORIZATION_HEADER_NAME, + _REDACTED_VALUE, + _COPY_SOURCE_HEADER_NAME, ) from ._error import ( _ERROR_DECRYPTION_FAILURE, _http_error_handler, + _wrap_exception, + AzureSigningError, ) from ._http import HTTPError from ._http.httpclient import _HTTPClient @@ -41,6 +44,23 @@ ) from .retry import ExponentialRetry from io import UnsupportedOperation +from .sharedaccesssignature import _QueryStringConstants + +if sys.version_info >= (3,): + from urllib.parse import ( + urlparse, + parse_qsl, + urlunparse, + urlencode, + ) +else: + from urlparse import ( + urlparse, + parse_qsl, + urlunparse, + ) + from urllib import urlencode +logger = logging.getLogger(__name__) class StorageClient(object): @@ -210,6 +230,36 @@ def extract_date_and_request_id(retry_context): else: return "" + @staticmethod + def _scrub_headers(headers): + # make a copy to avoid contaminating the request + clean_headers = headers.copy() + + if _AUTHORIZATION_HEADER_NAME in clean_headers: + clean_headers[_AUTHORIZATION_HEADER_NAME] = _REDACTED_VALUE + + # in case of copy operations, there could be a SAS signature present in the header value + if _COPY_SOURCE_HEADER_NAME in clean_headers \ + and _QueryStringConstants.SIGNED_SIGNATURE + "=" in clean_headers[_COPY_SOURCE_HEADER_NAME]: + # take the url apart and scrub away the signed signature + scheme, netloc, path, params, query, fragment = urlparse(clean_headers[_COPY_SOURCE_HEADER_NAME]) + parsed_qs = dict(parse_qsl(query)) + parsed_qs[_QueryStringConstants.SIGNED_SIGNATURE] = _REDACTED_VALUE + + # the SAS needs to be put back together + clean_headers[_COPY_SOURCE_HEADER_NAME] = urlunparse( + (scheme, netloc, path, params, urlencode(parsed_qs), fragment)) + return clean_headers + + @staticmethod + def _scrub_query_parameters(query): + # make a copy to avoid contaminating the request + clean_queries = query.copy() + + if _QueryStringConstants.SIGNED_SIGNATURE in clean_queries: + clean_queries[_QueryStringConstants.SIGNED_SIGNATURE] = _REDACTED_VALUE + return clean_queries + def _perform_request(self, request, parser=None, parser_args=None, operation_context=None, expected_errors=None): ''' Sends the request and return response. Catches HTTPError and hands it @@ -258,12 +308,14 @@ def _perform_request(self, request, parser=None, parser_args=None, operation_con retry_context.request = request # Log the request before it goes out - logger.info("%s Outgoing request: Method=%s, Path=%s, Query=%s, Headers=%s.", - client_request_id_prefix, - request.method, - request.path, - request.query, - str(request.headers).replace('\n', '')) + # Avoid unnecessary scrubbing if the logger is not on + if logger.isEnabledFor(logging.INFO): + logger.info("%s Outgoing request: Method=%s, Path=%s, Query=%s, Headers=%s.", + client_request_id_prefix, + request.method, + request.path, + self._scrub_query_parameters(request.query), + str(self._scrub_headers(request.headers)).replace('\n', '')) # Perform the request response = self._httpclient.perform_request(request) @@ -306,21 +358,13 @@ def _perform_request(self, request, parser=None, parser_args=None, operation_con raise ex except Exception as ex: retry_context.exception = ex - if sys.version_info >= (3,): - # Automatic chaining in Python 3 means we keep the trace - raise AzureException(ex.args[0]) - else: - # There isn't a good solution in 2 for keeping the stack trace - # in general, or that will not result in an error in 3 - # However, we can keep the previous error type and message - # TODO: In the future we will log the trace - msg = "" - if len(ex.args) > 0: - msg = ex.args[0] - raise AzureException('{}: {}'.format(ex.__class__.__name__, msg)) + raise _wrap_exception(ex, AzureException) except AzureException as ex: # only parse the strings used for logging if logging is at least enabled for CRITICAL + exception_str_in_one_line = '' + status_code = '' + timestamp_and_request_id = '' if logger.isEnabledFor(logging.CRITICAL): exception_str_in_one_line = str(ex).replace('\n', '') status_code = retry_context.response.status if retry_context.response is not None else 'Unknown' @@ -335,6 +379,11 @@ def _perform_request(self, request, parser=None, parser_args=None, operation_con status_code, exception_str_in_one_line) raise ex + elif isinstance(ex, AzureSigningError): + logger.info("%s Unable to sign the request: Exception=%s.", + client_request_id_prefix, + exception_str_in_one_line) + raise ex logger.info("%s Operation failed: checking if the operation should be retried. " "Current retry count=%s, %s, HTTP status code=%s, Exception=%s.", diff --git a/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/vendor_azure_storage_version.md b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/vendor_azure_storage_version.md new file mode 100644 index 000000000000..caabbbca7d46 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/azure/eventprocessorhost/vendor/vendor_azure_storage_version.md @@ -0,0 +1,2 @@ +# azure-storage-blob 2.0.1 +# azure-storage-common 2.0.0 From f6dcf2dcd6500abfeadfc87313b15cfd3f80a9a0 Mon Sep 17 00:00:00 2001 From: yijxie Date: Sat, 22 Jun 2019 23:36:16 -0700 Subject: [PATCH 39/54] catch exception for mgmt_request --- .../azure/eventhub/aio/client_async.py | 108 +++++++++--------- .../azure-eventhubs/azure/eventhub/client.py | 108 +++++++++--------- 2 files changed, 112 insertions(+), 104 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index dec943455ee2..efc96d30526f 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -14,8 +14,10 @@ from uamqp import ( Message, AMQPClientAsync, + errors, ) +from azure.eventhub.error import ConnectError from azure.eventhub.common import parse_sas_token, EventPosition, EventHubSharedKeyCredential, EventHubSASTokenCredential from ..client_abstract import EventHubClientAbstract @@ -84,6 +86,36 @@ def _create_auth(self, username=None, password=None): get_jwt_token, http_proxy=http_proxy, transport_type=transport_type) + async def _management_request(self, mgmt_msg, op_type): + alt_creds = { + "username": self._auth_config.get("iot_username"), + "password": self._auth_config.get("iot_password")} + connect_count = 0 + while True: + connect_count += 1 + mgmt_auth = self._create_auth(**alt_creds) + mgmt_client = AMQPClientAsync(self.mgmt_target, auth=mgmt_auth, debug=self.config.network_tracing) + try: + await mgmt_client.open_async() + response = await mgmt_client.mgmt_request_async( + mgmt_msg, + constants.READ_OPERATION, + op_type=op_type, + status_code_field=b'status-code', + description_fields=b'status-description') + return response + except (errors.AMQPConnectionError, errors.TokenAuthFailure) as failure: + if connect_count >= self.config.max_retries: + err = ConnectError( + "Can not connect to EventHubs or get management info from the service. " + "Please make sure the connection string or token is correct and retry. " + "Besides, this method doesn't work if you use an IoT connection string.", + failure + ) + raise err + finally: + await mgmt_client.close_async() + async def get_properties(self): # type:() -> Dict[str, Any] """ @@ -96,29 +128,15 @@ async def get_properties(self): :rtype: dict """ - alt_creds = { - "username": self._auth_config.get("iot_username"), - "password": self._auth_config.get("iot_password")} - try: - mgmt_auth = self._create_auth(**alt_creds) - mgmt_client = AMQPClientAsync(self.mgmt_target, auth=mgmt_auth, debug=self.debug) - await mgmt_client.open_async() - mgmt_msg = Message(application_properties={'name': self.eh_name}) - response = await mgmt_client.mgmt_request_async( - mgmt_msg, - constants.READ_OPERATION, - op_type=b'com.microsoft:eventhub', - status_code_field=b'status-code', - description_fields=b'status-description') - eh_info = response.get_data() - output = {} - if eh_info: - output['path'] = eh_info[b'name'].decode('utf-8') - output['created_at'] = datetime.datetime.utcfromtimestamp(float(eh_info[b'created_at']) / 1000) - output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']] - return output - finally: - await mgmt_client.close_async() + mgmt_msg = Message(application_properties={'name': self.eh_name}) + response = await self._management_request(mgmt_msg, op_type=b'com.microsoft:eventhub') + output = {} + eh_info = response.get_data() + if eh_info: + output['path'] = eh_info[b'name'].decode('utf-8') + output['created_at'] = datetime.datetime.utcfromtimestamp(float(eh_info[b'created_at']) / 1000) + output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']] + return output async def get_partition_ids(self): # type:() -> List[str] @@ -147,35 +165,21 @@ async def get_partition_properties(self, partition): :type partition: str :rtype: dict """ - alt_creds = { - "username": self._auth_config.get("iot_username"), - "password": self._auth_config.get("iot_password")} - try: - mgmt_auth = self._create_auth(**alt_creds) - mgmt_client = AMQPClientAsync(self.mgmt_target, auth=mgmt_auth, debug=self.debug) - await mgmt_client.open_async() - mgmt_msg = Message(application_properties={'name': self.eh_name, - 'partition': partition}) - response = await mgmt_client.mgmt_request_async( - mgmt_msg, - constants.READ_OPERATION, - op_type=b'com.microsoft:partition', - status_code_field=b'status-code', - description_fields=b'status-description') - partition_info = response.get_data() - output = {} - if partition_info: - output['event_hub_path'] = partition_info[b'name'].decode('utf-8') - output['id'] = partition_info[b'partition'].decode('utf-8') - output['beginning_sequence_number'] = partition_info[b'begin_sequence_number'] - output['last_enqueued_sequence_number'] = partition_info[b'last_enqueued_sequence_number'] - output['last_enqueued_offset'] = partition_info[b'last_enqueued_offset'].decode('utf-8') - output['last_enqueued_time_utc'] = datetime.datetime.utcfromtimestamp( - float(partition_info[b'last_enqueued_time_utc'] / 1000)) - output['is_empty'] = partition_info[b'is_partition_empty'] - return output - finally: - await mgmt_client.close_async() + mgmt_msg = Message(application_properties={'name': self.eh_name, + 'partition': partition}) + response = await self._management_request(mgmt_msg, op_type=b'com.microsoft:partition') + partition_info = response.get_data() + output = {} + if partition_info: + output['event_hub_path'] = partition_info[b'name'].decode('utf-8') + output['id'] = partition_info[b'partition'].decode('utf-8') + output['beginning_sequence_number'] = partition_info[b'begin_sequence_number'] + output['last_enqueued_sequence_number'] = partition_info[b'last_enqueued_sequence_number'] + output['last_enqueued_offset'] = partition_info[b'last_enqueued_offset'].decode('utf-8') + output['last_enqueued_time_utc'] = datetime.datetime.utcfromtimestamp( + float(partition_info[b'last_enqueued_time_utc'] / 1000)) + output['is_empty'] = partition_info[b'is_partition_empty'] + return output def create_consumer( self, consumer_group, partition_id, event_position, owner_level=None, diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index f07723cf970e..976ee5723cd5 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -18,10 +18,12 @@ from uamqp import Message, AMQPClient from uamqp import authentication from uamqp import constants +from uamqp import errors from azure.eventhub.producer import EventHubProducer from azure.eventhub.consumer import EventHubConsumer from azure.eventhub.common import parse_sas_token, EventPosition +from azure.eventhub.error import ConnectError from .client_abstract import EventHubClientAbstract from .common import EventHubSASTokenCredential, EventHubSharedKeyCredential @@ -89,6 +91,36 @@ def _create_auth(self, username=None, password=None): get_jwt_token, http_proxy=http_proxy, transport_type=transport_type) + def _management_request(self, mgmt_msg, op_type): + alt_creds = { + "username": self._auth_config.get("iot_username"), + "password": self._auth_config.get("iot_password")} + connect_count = 0 + while True: + connect_count += 1 + mgmt_auth = self._create_auth(**alt_creds) + mgmt_client = uamqp.AMQPClient(self.mgmt_target, auth=mgmt_auth, debug=self.config.network_tracing) + try: + mgmt_client.open() + response = mgmt_client.mgmt_request( + mgmt_msg, + constants.READ_OPERATION, + op_type=op_type, + status_code_field=b'status-code', + description_fields=b'status-description') + return response + except (errors.AMQPConnectionError, errors.TokenAuthFailure) as failure: + if connect_count >= self.config.max_retries: + err = ConnectError( + "Can not connect to EventHubs or get management info from the service. " + "Please make sure the connection string or token is correct and retry. " + "Besides, this method doesn't work if you use an IoT connection string.", + failure + ) + raise err + finally: + mgmt_client.close() + def get_properties(self): # type:() -> Dict[str, Any] """ @@ -101,29 +133,15 @@ def get_properties(self): :rtype: dict """ - alt_creds = { - "username": self._auth_config.get("iot_username"), - "password": self._auth_config.get("iot_password")} - try: - mgmt_auth = self._create_auth(**alt_creds) - mgmt_client = uamqp.AMQPClient(self.mgmt_target, auth=mgmt_auth, debug=self.config.network_tracing) - mgmt_client.open() - mgmt_msg = Message(application_properties={'name': self.eh_name}) - response = mgmt_client.mgmt_request( - mgmt_msg, - constants.READ_OPERATION, - op_type=b'com.microsoft:eventhub', - status_code_field=b'status-code', - description_fields=b'status-description') - eh_info = response.get_data() - output = {} - if eh_info: - output['path'] = eh_info[b'name'].decode('utf-8') - output['created_at'] = datetime.datetime.utcfromtimestamp(float(eh_info[b'created_at'])/1000) - output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']] - return output - finally: - mgmt_client.close() + mgmt_msg = Message(application_properties={'name': self.eh_name}) + response = self._management_request(mgmt_msg, op_type=b'com.microsoft:eventhub') + output = {} + eh_info = response.get_data() + if eh_info: + output['path'] = eh_info[b'name'].decode('utf-8') + output['created_at'] = datetime.datetime.utcfromtimestamp(float(eh_info[b'created_at']) / 1000) + output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']] + return output def get_partition_ids(self): # type:() -> List[str] @@ -152,35 +170,21 @@ def get_partition_properties(self, partition): :type partition: str :rtype: dict """ - alt_creds = { - "username": self._auth_config.get("iot_username"), - "password": self._auth_config.get("iot_password")} - try: - mgmt_auth = self._create_auth(**alt_creds) - mgmt_client = AMQPClient(self.mgmt_target, auth=mgmt_auth, debug=self.debug) - mgmt_client.open() - mgmt_msg = Message(application_properties={'name': self.eh_name, - 'partition': partition}) - response = mgmt_client.mgmt_request( - mgmt_msg, - constants.READ_OPERATION, - op_type=b'com.microsoft:partition', - status_code_field=b'status-code', - description_fields=b'status-description') - partition_info = response.get_data() - output = {} - if partition_info: - output['event_hub_path'] = partition_info[b'name'].decode('utf-8') - output['id'] = partition_info[b'partition'].decode('utf-8') - output['beginning_sequence_number'] = partition_info[b'begin_sequence_number'] - output['last_enqueued_sequence_number'] = partition_info[b'last_enqueued_sequence_number'] - output['last_enqueued_offset'] = partition_info[b'last_enqueued_offset'].decode('utf-8') - output['last_enqueued_time_utc'] = datetime.datetime.utcfromtimestamp( - float(partition_info[b'last_enqueued_time_utc'] / 1000)) - output['is_empty'] = partition_info[b'is_partition_empty'] - return output - finally: - mgmt_client.close() + mgmt_msg = Message(application_properties={'name': self.eh_name, + 'partition': partition}) + response = self._management_request(mgmt_msg, op_type=b'com.microsoft:partition') + partition_info = response.get_data() + output = {} + if partition_info: + output['event_hub_path'] = partition_info[b'name'].decode('utf-8') + output['id'] = partition_info[b'partition'].decode('utf-8') + output['beginning_sequence_number'] = partition_info[b'begin_sequence_number'] + output['last_enqueued_sequence_number'] = partition_info[b'last_enqueued_sequence_number'] + output['last_enqueued_offset'] = partition_info[b'last_enqueued_offset'].decode('utf-8') + output['last_enqueued_time_utc'] = datetime.datetime.utcfromtimestamp( + float(partition_info[b'last_enqueued_time_utc'] / 1000)) + output['is_empty'] = partition_info[b'is_partition_empty'] + return output def create_consumer( self, consumer_group, partition_id, event_position, From c6ae04a2fc0c64263e323d4ddf5ee118b610a0c5 Mon Sep 17 00:00:00 2001 From: Yunhao Ling <47871814+yunhaoling@users.noreply.github.com> Date: Sun, 23 Jun 2019 16:47:14 -0700 Subject: [PATCH 40/54] Update comment and code structure (#6042) --- .../azure/eventhub/aio/client_async.py | 20 +- .../azure/eventhub/aio/consumer_async.py | 95 ++++----- .../azure/eventhub/aio/producer_async.py | 106 +++++----- .../azure-eventhubs/azure/eventhub/client.py | 15 +- .../azure/eventhub/client_abstract.py | 181 +++++++----------- .../azure-eventhubs/azure/eventhub/common.py | 38 ++-- .../azure/eventhub/configuration.py | 1 - .../azure/eventhub/consumer.py | 100 +++++----- .../azure-eventhubs/azure/eventhub/error.py | 5 +- .../azure/eventhub/producer.py | 98 +++++----- 10 files changed, 311 insertions(+), 348 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index efc96d30526f..cdcba824ad95 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -2,15 +2,13 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- - import logging import datetime import functools import asyncio -from typing import Any, List, Dict, Union - +from typing import Any, List, Dict -from uamqp import authentication, constants, types, errors +from uamqp import authentication, constants from uamqp import ( Message, AMQPClientAsync, @@ -188,11 +186,12 @@ def create_consumer( """ Create an async consumer to the client for a particular consumer group and partition. - :param consumer_group: The name of the consumer group. Default value is `$Default`. + :param consumer_group: The name of the consumer group this consumer is associated with. + Events are read in the context of this group. :type consumer_group: str - :param partition_id: The ID of the partition. + :param partition_id: The identifier of the Event Hub partition from which events will be received. :type partition_id: str - :param event_position: The position from which to start receiving. + :param event_position: The position within the partition where the consumer should begin reading events. :type event_position: ~azure.eventhub.common.EventPosition :param owner_level: The priority of the exclusive consumer. The client will create an exclusive consumer if owner_level is set. @@ -228,8 +227,7 @@ def create_producer( self, partition_id=None, operation=None, send_timeout=None, loop=None): # type: (str, str, float, asyncio.AbstractEventLoop) -> EventHubProducer """ - Create an async producer to the client to send ~azure.eventhub.common.EventData object - to an EventHub. + Create an async producer to send EventData object to an EventHub. :param partition_id: Optionally specify a particular partition to send to. If omitted, the events will be distributed to available partitions via @@ -244,15 +242,13 @@ def create_producer( :param loop: An event loop. If not specified the default event loop will be used. :rtype ~azure.eventhub.aio.sender_async.EventHubProducer - Example: .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py :start-after: [START create_eventhub_client_async_sender] :end-before: [END create_eventhub_client_async_sender] :language: python :dedent: 4 - :caption: Add an async producer to the client to - send ~azure.eventhub.common.EventData object to an EventHub. + :caption: Add an async producer to the client to send EventData. """ target = "amqps://{}{}".format(self.address.hostname, self.address.path) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py index 3cf285106c62..3ff10b64ed04 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py @@ -2,7 +2,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- - import asyncio import uuid import logging @@ -19,7 +18,16 @@ class EventHubConsumer(object): """ - Implements the async API of a EventHubConsumer. + A consumer responsible for reading EventData from a specific Event Hub + partition and as a member of a specific consumer group. + + A consumer may be exclusive, which asserts ownership over the partition for the consumer + group to ensure that only one consumer from that group is reading the from the partition. + These exclusive consumers are sometimes referred to as "Epoch Consumers." + + A consumer may also be non-exclusive, allowing multiple consumers from the same consumer + group to be actively reading events from the partition. These non-exclusive consumers are + sometimes referred to as "Non-Epoch Consumers." """ timeout = 0 @@ -29,7 +37,8 @@ def __init__( # pylint: disable=super-init-not-called self, client, source, event_position=None, prefetch=300, owner_level=None, keep_alive=None, auto_reconnect=True, loop=None): """ - Instantiate an async consumer. + Instantiate an async consumer. EventHubConsumer should be instantiated by calling the `create_consumer` method + in EventHubClient. :param client: The parent EventHubClientAsync. :type client: ~azure.eventhub.aio.EventHubClientAsync @@ -158,6 +167,7 @@ def _check_closed(self): if self.error: raise EventHubError("This consumer has been closed. Please create a new consumer to receive event data.", self.error) + async def _open(self): """ Open the EventHubConsumer using the supplied connection. @@ -282,41 +292,6 @@ async def _reconnect(self): a retryable error - attempt to reconnect.""" return await self._build_connection(is_reconnect=True) - async def close(self, exception=None): - # type: (Exception) -> None - """ - Close down the handler. If the handler has already closed, - this will be a no op. An optional exception can be passed in to - indicate that the handler was shutdown due to error. - - :param exception: An optional exception if the handler is closing - due to an error. - :type exception: Exception - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START eventhub_client_async_receiver_close] - :end-before: [END eventhub_client_async_receiver_close] - :language: python - :dedent: 4 - :caption: Close down the handler. - - """ - self.running = False - if self.error: - return - if isinstance(exception, errors.LinkRedirect): - self.redirected = exception - elif isinstance(exception, EventHubError): - self.error = exception - elif isinstance(exception, (errors.LinkDetach, errors.ConnectionClose)): - self.error = ConnectError(str(exception), exception) - elif exception: - self.error = EventHubError(str(exception)) - else: - self.error = EventHubError("This receive handler is now closed.") - await self._handler.close_async() - @property def queue_size(self): # type: () -> int @@ -341,10 +316,8 @@ async def receive(self, max_batch_size=None, timeout=None): retrieve before the time, the result will be empty. If no batch size is supplied, the prefetch size will be the maximum. :type max_batch_size: int - :param timeout: The timeout time in seconds to receive a batch of events - from an Event Hub. Results will be returned after timeout. If combined - with max_batch_size, it will return after either the count of received events - reaches the max_batch_size or the operation has timed out. + :param timeout: The maximum wait time to build up the requested message count for the batch. + If not specified, the default wait time specified when the consumer was created will be used. :type timeout: float :rtype: list[~azure.eventhub.common.EventData] @@ -357,6 +330,7 @@ async def receive(self, max_batch_size=None, timeout=None): :caption: Receives events asynchronously """ + self._check_closed() await self._open() max_batch_size = min(self.client.config.max_batch_size, self.prefetch) if max_batch_size is None else max_batch_size @@ -425,4 +399,39 @@ async def receive(self, max_batch_size=None, timeout=None): log.info("Unexpected error occurred (%r). Shutting down.", e) error = EventHubError("Receive failed: {}".format(e)) await self.close(exception=error) - raise error \ No newline at end of file + raise error + + async def close(self, exception=None): + # type: (Exception) -> None + """ + Close down the handler. If the handler has already closed, + this will be a no op. An optional exception can be passed in to + indicate that the handler was shutdown due to error. + + :param exception: An optional exception if the handler is closing + due to an error. + :type exception: Exception + + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START eventhub_client_async_receiver_close] + :end-before: [END eventhub_client_async_receiver_close] + :language: python + :dedent: 4 + :caption: Close down the handler. + + """ + self.running = False + if self.error: + return + if isinstance(exception, errors.LinkRedirect): + self.redirected = exception + elif isinstance(exception, EventHubError): + self.error = exception + elif isinstance(exception, (errors.LinkDetach, errors.ConnectionClose)): + self.error = ConnectError(str(exception), exception) + elif exception: + self.error = EventHubError(str(exception)) + else: + self.error = EventHubError("This receive handler is now closed.") + await self._handler.close_async() \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py index 5398186d0e39..169ad3a3e702 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py @@ -2,7 +2,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- - import uuid import asyncio import logging @@ -19,7 +18,10 @@ class EventHubProducer(object): """ - Implements the async API of a EventHubProducer. + A producer responsible for transmitting EventData to a specific Event Hub, + grouped together in batches. Depending on the options specified at creation, the producer may + be created to allow event data to be automatically routed to an available partition or specific + to a partition. """ @@ -27,7 +29,8 @@ def __init__( # pylint: disable=super-init-not-called self, client, target, partition=None, send_timeout=60, keep_alive=None, auto_reconnect=True, loop=None): """ - Instantiate an async EventHubProducer. + Instantiate an async EventHubProducer. EventHubProducer should be instantiated by calling the `create_producer` + method in EventHubClient. :param client: The parent EventHubClientAsync. :type client: ~azure.eventhub.aio.EventHubClientAsync @@ -131,7 +134,8 @@ async def _build_connection(self, is_reconnect=False): error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, - properties=self.client._create_properties(self.client.config.user_agent)) + properties=self.client._create_properties(self.client.config.user_agent), + loop=self.loop) try: await self._handler.open_async() while not await self._handler.client_ready_async(): @@ -191,41 +195,6 @@ async def _build_connection(self, is_reconnect=False): async def _reconnect(self): return await self._build_connection(is_reconnect=True) - async def close(self, exception=None): - # type: (Exception) -> None - """ - Close down the handler. If the handler has already closed, - this will be a no op. An optional exception can be passed in to - indicate that the handler was shutdown due to error. - - :param exception: An optional exception if the handler is closing - due to an error. - :type exception: Exception - - Example: - .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py - :start-after: [START eventhub_client_async_sender_close] - :end-before: [END eventhub_client_async_sender_close] - :language: python - :dedent: 4 - :caption: Close down the handler. - - """ - self.running = False - if self.error: - return - if isinstance(exception, errors.LinkRedirect): - self.redirected = exception - elif isinstance(exception, EventHubError): - self.error = exception - elif isinstance(exception, (errors.LinkDetach, errors.ConnectionClose)): - self.error = ConnectError(str(exception), exception) - elif exception: - self.error = EventHubError(str(exception)) - else: - self.error = EventHubError("This send handler is now closed.") - await self._handler.close_async() - async def _send_event_data(self): await self._open() max_retries = self.client.config.max_retries @@ -307,6 +276,23 @@ def _check_closed(self): raise EventHubError("This producer has been closed. Please create a new producer to send event data.", self.error) + def _on_outcome(self, outcome, condition): + """ + Called when the outcome is received for a delivery. + + :param outcome: The outcome of the message delivery - success or failure. + :type outcome: ~uamqp.constants.MessageSendResult + :param condition: Detail information of the outcome. + + """ + self._outcome = outcome + self._condition = condition + + @staticmethod + def _error(outcome, condition): + if outcome != constants.MessageSendResult.Ok: + raise condition + @staticmethod def _set_partition_key(event_datas, partition_key): ed_iter = iter(event_datas) @@ -352,19 +338,37 @@ async def send(self, event_data, partition_key=None): self.unsent_events = [wrapper_event_data.message] await self._send_event_data() - def _on_outcome(self, outcome, condition): + async def close(self, exception=None): + # type: (Exception) -> None """ - Called when the outcome is received for a delivery. + Close down the handler. If the handler has already closed, + this will be a no op. An optional exception can be passed in to + indicate that the handler was shutdown due to error. - :param outcome: The outcome of the message delivery - success or failure. - :type outcome: ~uamqp.constants.MessageSendResult - :param condition: Detail information of the outcome. + :param exception: An optional exception if the handler is closing + due to an error. + :type exception: Exception - """ - self._outcome = outcome - self._condition = condition + Example: + .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py + :start-after: [START eventhub_client_async_sender_close] + :end-before: [END eventhub_client_async_sender_close] + :language: python + :dedent: 4 + :caption: Close down the handler. - @staticmethod - def _error(outcome, condition): - if outcome != constants.MessageSendResult.Ok: - raise condition + """ + self.running = False + if self.error: + return + if isinstance(exception, errors.LinkRedirect): + self.redirected = exception + elif isinstance(exception, EventHubError): + self.error = exception + elif isinstance(exception, (errors.LinkDetach, errors.ConnectionClose)): + self.error = ConnectError(str(exception), exception) + elif exception: + self.error = EventHubError(str(exception)) + else: + self.error = EventHubError("This send handler is now closed.") + await self._handler.close_async() \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index 976ee5723cd5..c2dc694bbe9b 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -12,10 +12,10 @@ from urllib import unquote_plus, urlencode, quote_plus except ImportError: from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus -from typing import Any, List, Dict, Union +from typing import Any, List, Dict import uamqp -from uamqp import Message, AMQPClient +from uamqp import Message from uamqp import authentication from uamqp import constants from uamqp import errors @@ -194,11 +194,12 @@ def create_consumer( """ Create a consumer to the client for a particular consumer group and partition. - :param consumer_group: The name of the consumer group. Default value is `$Default`. + :param consumer_group: The name of the consumer group this consumer is associated with. + Events are read in the context of this group. :type consumer_group: str - :param partition_id: The ID of the partition. + :param partition_id: The identifier of the Event Hub partition from which events will be received. :type partition_id: str - :param event_position: The position from which to start receiving. + :param event_position: The position within the partition where the consumer should begin reading events. :type event_position: ~azure.eventhub.common.EventPosition :param owner_level: The priority of the exclusive consumer. The client will create an exclusive consumer if owner_level is set. @@ -232,7 +233,7 @@ def create_consumer( def create_producer(self, partition_id=None, operation=None, send_timeout=None): # type: (str, str, float) -> EventHubProducer """ - Create a EventHubProducer to send EventData object to an EventHub. + Create an producer to send EventData object to an EventHub. :param partition_id: Optionally specify a particular partition to send to. If omitted, the events will be distributed to available partitions via @@ -252,7 +253,7 @@ def create_producer(self, partition_id=None, operation=None, send_timeout=None): :end-before: [END create_eventhub_client_sender] :language: python :dedent: 4 - :caption: Add a producer to the client to send EventData object to an EventHub. + :caption: Add a producer to the client to send EventData. """ target = "amqps://{}{}".format(self.address.hostname, self.address.path) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index afb58280442f..dc041269c5e5 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -22,8 +22,7 @@ TYPE_CHECKING = False if TYPE_CHECKING: from azure.core.credentials import TokenCredential - from typing import Union, List, Dict - + from typing import Union from azure.eventhub import __version__ from azure.eventhub.configuration import Configuration @@ -32,6 +31,7 @@ log = logging.getLogger(__name__) MAX_USER_AGENT_LENGTH = 512 + def _parse_conn_str(conn_str): endpoint = None shared_access_key_name = None @@ -100,7 +100,7 @@ def __init__(self, host, event_hub_path, credential, **kwargs): :param host: The hostname of the the Event Hub. :type host: str - :param event_hub_path: The path/name of the Event Hub + :param event_hub_path: The path of the specific Event Hub to connect the client to. :type event_hub_path: str :param network_tracing: Whether to output network trace logs to the logger. Default is `False`. @@ -121,16 +121,15 @@ def __init__(self, host, event_hub_path, credential, **kwargs): :param max_retries: The max number of attempts to redo the failed operation when an error happened. Default value is 3. :type max_retries: int - :param transport_type: The transport protocol type - default is ~uamqp.TransportType.Amqp. - ~uamqp.TransportType.AmqpOverWebsocket is applied when http_proxy is set or the - transport type is explicitly requested. + :param transport_type: The type of transport protocol that will be used for communicating with + the Event Hubs service. Default is ~azure.eventhub.TransportType.Amqp. :type transport_type: ~azure.eventhub.TransportType :param prefetch: The message prefetch count of the consumer. Default is 300. :type prefetch: int :param max_batch_size: Receive a batch of events. Batch size will be up to the maximum specified, but will return as soon as service returns no new events. Default value is the same as prefetch. :type max_batch_size: int - :param receive_timeout: The timeout time in seconds to receive a batch of events from an Event Hub. + :param receive_timeout: The timeout in seconds to receive a batch of events from an Event Hub. Default value is 0 seconds. :type receive_timeout: float :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is @@ -161,115 +160,8 @@ def __init__(self, host, event_hub_path, credential, **kwargs): log.info("%r: Created the Event Hub client", self.container_id) - @classmethod - def from_connection_string(cls, conn_str, event_hub_path=None, **kwargs): - """Create an EventHubClient from a connection string. - - :param conn_str: The connection string. - :type conn_str: str - :param event_hub_path: The path/name of the Event Hub, if the EntityName is - not included in the connection string. - :type event_hub_path: str - :param network_tracing: Whether to output network trace logs to the logger. Default - is `False`. - :type network_tracing: bool - :param http_proxy: HTTP proxy settings. This must be a dictionary with the following - keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). - Additionally the following keys may also be present: 'username', 'password'. - :type http_proxy: dict[str, Any] - :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. - The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. - :type auth_timeout: float - :param user_agent: The user agent that needs to be appended to the built in user agent string. - :type user_agent: str - :param max_retries: The max number of attempts to redo the failed operation when an error happened. Default - value is 3. - :type max_retries: int - :param transport_type: The transport protocol type - default is ~uamqp.TransportType.Amqp. - ~uamqp.TransportType.AmqpOverWebsocket is applied when http_proxy is set or the - transport type is explicitly requested. - :type transport_type: ~azure.eventhub.TransportType - :param prefetch: The message prefetch count of the consumer. Default is 300. - :type prefetch: int - :param max_batch_size: Receive a batch of events. Batch size will be up to the maximum specified, but - will return as soon as service returns no new events. Default value is the same as prefetch. - :type max_batch_size: int - :param receive_timeout: The timeout time in seconds to receive a batch of events from an Event Hub. - Default value is 0 seconds. - :type receive_timeout: float - :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is - queued. Default value is 60 seconds. If set to 0, there will be no timeout. - :type send_timeout: float - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START create_eventhub_client_connstr] - :end-before: [END create_eventhub_client_connstr] - :language: python - :dedent: 4 - :caption: Create an EventHubClient from a connection string. - - """ - is_iot_conn_str = conn_str.lstrip().lower().startswith("hostname") - if not is_iot_conn_str: - address, policy, key, entity = _parse_conn_str(conn_str) - entity = event_hub_path or entity - left_slash_pos = address.find("//") - if left_slash_pos != -1: - host = address[left_slash_pos + 2:] - else: - host = address - return cls(host, entity, EventHubSharedKeyCredential(policy, key), **kwargs) - else: - return cls._from_iothub_connection_string(conn_str, **kwargs) - @classmethod def _from_iothub_connection_string(cls, conn_str, **kwargs): - """ - Create an EventHubClient from an IoTHub connection string. - - :param conn_str: The connection string. - :type conn_str: str - :param network_tracing: Whether to output network trace logs to the logger. Default - is `False`. - :type network_tracing: bool - :param http_proxy: HTTP proxy settings. This must be a dictionary with the following - keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). - Additionally the following keys may also be present: 'username', 'password'. - :type http_proxy: dict[str, Any] - :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. - The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. - :type auth_timeout: float - :param user_agent: The user agent that needs to be appended to the built in user agent string. - :type user_agent: str - :param max_retries: The max number of attempts to redo the failed operation when an error happened. Default - value is 3. - :type max_retries: int - :param transport_type: The transport protocol type - default is ~uamqp.TransportType.Amqp. - ~uamqp.TransportType.AmqpOverWebsocket is applied when http_proxy is set or the - transport type is explicitly requested. - :type transport_type: ~azure.eventhub.TransportType - :param prefetch: The message prefetch count of the consumer. Default is 300. - :type prefetch: int - :param max_batch_size: Receive a batch of events. Batch size will be up to the maximum specified, but - will return as soon as service returns no new events. Default value is the same as prefetch. - :type max_batch_size: int - :param receive_timeout: The timeout time in seconds to receive a batch of events from an Event Hub. - Default value is 0 seconds. - :type receive_timeout: float - :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is - queued. Default value is 60 seconds. If set to 0, there will be no timeout. - :type send_timeout: float - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START create_eventhub_client_iot_connstr] - :end-before: [END create_eventhub_client_iot_connstr] - :language: python - :dedent: 4 - :caption: Create an EventHubClient from an IoTHub connection string. - - """ address, policy, key, _ = _parse_conn_str(conn_str) hub_name = address.split('.')[0] username = "{}@sas.root.{}".format(policy, hub_name) @@ -326,6 +218,67 @@ def _process_redirect_uri(self, redirect): self.eh_name = self.address.path.lstrip('/') self.mgmt_target = redirect_uri + @classmethod + def from_connection_string(cls, conn_str, event_hub_path=None, **kwargs): + """Create an EventHubClient from an EventHub/IotHub connection string. + + :param conn_str: The connection string. + :type conn_str: str + :param event_hub_path: The path of the specific Event Hub to connect the client to, if the EntityName is + not included in the connection string. + :type event_hub_path: str + :param network_tracing: Whether to output network trace logs to the logger. Default + is `False`. + :type network_tracing: bool + :param http_proxy: HTTP proxy settings. This must be a dictionary with the following + keys: 'proxy_hostname' (str value) and 'proxy_port' (int value). + Additionally the following keys may also be present: 'username', 'password'. + :type http_proxy: dict[str, Any] + :param auth_timeout: The time in seconds to wait for a token to be authorized by the service. + The default value is 60 seconds. If set to 0, no timeout will be enforced from the client. + :type auth_timeout: float + :param user_agent: The user agent that needs to be appended to the built in user agent string. + :type user_agent: str + :param max_retries: The max number of attempts to redo the failed operation when an error happened. Default + value is 3. + :type max_retries: int + :param transport_type: The type of transport protocol that will be used for communicating with + the Event Hubs service. Default is ~azure.eventhub.TransportType.Amqp. + :type transport_type: ~azure.eventhub.TransportType + :param prefetch: The message prefetch count of the consumer. Default is 300. + :type prefetch: int + :param max_batch_size: Receive a batch of events. Batch size will be up to the maximum specified, but + will return as soon as service returns no new events. Default value is the same as prefetch. + :type max_batch_size: int + :param receive_timeout: The timeout in seconds to receive a batch of events from an Event Hub. + Default value is 0 seconds. + :type receive_timeout: float + :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is + queued. Default value is 60 seconds. If set to 0, there will be no timeout. + :type send_timeout: float + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START create_eventhub_client_connstr] + :end-before: [END create_eventhub_client_connstr] + :language: python + :dedent: 4 + :caption: Create an EventHubClient from a connection string. + + """ + is_iot_conn_str = conn_str.lstrip().lower().startswith("hostname") + if not is_iot_conn_str: + address, policy, key, entity = _parse_conn_str(conn_str) + entity = event_hub_path or entity + left_slash_pos = address.find("//") + if left_slash_pos != -1: + host = address[left_slash_pos + 2:] + else: + host = address + return cls(host, entity, EventHubSharedKeyCredential(policy, key), **kwargs) + else: + return cls._from_iothub_connection_string(conn_str, **kwargs) + @abstractmethod def create_consumer( self, consumer_group, partition_id, event_position, owner_level=None, diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py index da01ec8603d1..5a6702a60324 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/common.py @@ -4,16 +4,12 @@ # -------------------------------------------------------------------------------------------- from __future__ import unicode_literals -from enum import Enum import datetime import calendar import json - import six -import uamqp -from uamqp import BatchMessage -from uamqp import types, constants, errors +from uamqp import BatchMessage, Message, types from uamqp.message import MessageHeader, MessageProperties @@ -33,9 +29,6 @@ def parse_sas_token(sas_token): return sas_data -Message = uamqp.Message - - class EventData(object): """ The EventData class is a holder of event content. @@ -109,7 +102,20 @@ def __str__(self): dic['partition_key'] = str(self.partition_key) return str(dic) + def _set_partition_key(self, value): + """ + Set the partition key of the event data object. + :param value: The partition key to set. + :type value: str or bytes + """ + annotations = dict(self._annotations) + annotations[self._partition_key] = value + header = MessageHeader() + header.durable = True + self.message.annotations = annotations + self.message.header = header + self._annotations = annotations @property def sequence_number(self): @@ -166,22 +172,6 @@ def partition_key(self): except KeyError: return self._annotations.get(EventData.PROP_PARTITION_KEY, None) - def _set_partition_key(self, value): - """ - Set the partition key of the event data object. - - :param value: The partition key to set. - :type value: str or bytes - """ - annotations = dict(self._annotations) - annotations[self._partition_key] = value - header = MessageHeader() - header.durable = True - self.message.annotations = annotations - self.message.header = header - self._annotations = annotations - - @property def application_properties(self): """ diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py index ba8aee6249b0..b3747a0ef581 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py @@ -2,7 +2,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- - from uamqp.constants import TransportType diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py index 661ce983efb9..bcc3c7e0b0c3 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py @@ -22,7 +22,16 @@ class EventHubConsumer(object): """ - Implements a EventHubConsumer. + A consumer responsible for reading EventData from a specific Event Hub + partition and as a member of a specific consumer group. + + A consumer may be exclusive, which asserts ownership over the partition for the consumer + group to ensure that only one consumer from that group is reading the from the partition. + These exclusive consumers are sometimes referred to as "Epoch Consumers." + + A consumer may also be non-exclusive, allowing multiple consumers from the same consumer + group to be actively reading events from the partition. These non-exclusive consumers are + sometimes referred to as "Non-Epoch Consumers." """ timeout = 0 @@ -31,7 +40,8 @@ class EventHubConsumer(object): def __init__(self, client, source, event_position=None, prefetch=300, owner_level=None, keep_alive=None, auto_reconnect=True): """ - Instantiate a consumer. + Instantiate a consumer. EventHubConsumer should be instantiated by calling the `create_consumer` method + in EventHubClient. :param client: The parent EventHubClient. :type client: ~azure.eventhub.client.EventHubClient @@ -147,11 +157,10 @@ def __next__(self): raise except KeyboardInterrupt: log.info("EventHubConsumer stops due to keyboard interrupt") - print("EventHubConsumer stopped") self.close() raise except Exception as e: - log.info("Unexpected error occurred (%r). Shutting down.", e) + log.error("Unexpected error occurred (%r). Shutting down.", e) error = EventHubError("Receive failed: {}".format(e)) self.close(exception=error) raise error @@ -291,7 +300,7 @@ def _build_connection(self, is_reconnect=False): log.info("EventHubConsumer authentication timed out. Attempting reconnect.") return False except Exception as e: - log.info("Unexpected error occurred (%r). Shutting down.", e) + log.error("Unexpected error occurred (%r). Shutting down.", e) error = EventHubError("EventHubConsumer reconnect failed: {}".format(e)) self.close(exception=error) raise error @@ -299,42 +308,6 @@ def _build_connection(self, is_reconnect=False): def _reconnect(self): return self._build_connection(is_reconnect=True) - def close(self, exception=None): - # type:(Exception) -> None - """ - Close down the handler. If the handler has already closed, - this will be a no op. An optional exception can be passed in to - indicate that the handler was shutdown due to error. - - :param exception: An optional exception if the handler is closing - due to an error. - :type exception: Exception - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START eventhub_client_receiver_close] - :end-before: [END eventhub_client_receiver_close] - :language: python - :dedent: 4 - :caption: Close down the handler. - - """ - if self.messages_iter: - self.messages_iter.close() - self.messages_iter = None - self.running = False - if self.error: - return - if isinstance(exception, errors.LinkRedirect): - self.redirected = exception - elif isinstance(exception, EventHubError): - self.error = exception - elif exception: - self.error = EventHubError(str(exception)) - else: - self.error = EventHubError("This receive handler is now closed.") - self._handler.close() - @property def queue_size(self): # type:() -> int @@ -359,10 +332,8 @@ def receive(self, max_batch_size=None, timeout=None): retrieve before the time, the result will be empty. If no batch size is supplied, the prefetch size will be the maximum. :type max_batch_size: int - :param timeout: The timeout time in seconds to receive a batch of events - from an Event Hub. Results will be returned after timeout. If combined - with max_batch_size, it will return after either the count of received events - reaches the max_batch_size or the operation has timed out. + :param timeout: The maximum wait time to build up the requested message count for the batch. + If not specified, the default wait time specified when the consumer was created will be used. :type timeout: float :rtype: list[~azure.eventhub.common.EventData] @@ -442,13 +413,48 @@ def receive(self, max_batch_size=None, timeout=None): raise TimeoutError(str(shutdown), shutdown) except KeyboardInterrupt: log.info("EventHubConsumer stops due to keyboard interrupt") - print("EventHubConsumer stopped") self.close() raise except Exception as e: - log.info("Unexpected error occurred (%r). Shutting down.", e) + log.error("Unexpected error occurred (%r). Shutting down.", e) error = EventHubError("Receive failed: {}".format(e)) self.close(exception=error) raise error + def close(self, exception=None): + # type:(Exception) -> None + """ + Close down the handler. If the handler has already closed, + this will be a no op. An optional exception can be passed in to + indicate that the handler was shutdown due to error. + + :param exception: An optional exception if the handler is closing + due to an error. + :type exception: Exception + + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START eventhub_client_receiver_close] + :end-before: [END eventhub_client_receiver_close] + :language: python + :dedent: 4 + :caption: Close down the handler. + + """ + if self.messages_iter: + self.messages_iter.close() + self.messages_iter = None + self.running = False + if self.error: + return + if isinstance(exception, errors.LinkRedirect): + self.redirected = exception + elif isinstance(exception, EventHubError): + self.error = exception + elif exception: + self.error = EventHubError(str(exception)) + else: + self.error = EventHubError("This receive handler is now closed.") + self._handler.close() + next = __next__ # for python2.7 diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py index a93d052af4fe..5921c1b6e7b1 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py @@ -2,9 +2,10 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- - -from uamqp import types, constants, errors import six + +from uamqp import constants, errors + from azure.core.exceptions import AzureError _NO_RETRY_ERRORS = ( diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py index b3b26293a44f..f06471cd078b 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py @@ -22,13 +22,17 @@ class EventHubProducer(object): """ - Implements a EventHubProducer. + A producer responsible for transmitting EventData to a specific Event Hub, + grouped together in batches. Depending on the options specified at creation, the producer may + be created to allow event data to be automatically routed to an available partition or specific + to a partition. """ def __init__(self, client, target, partition=None, send_timeout=60, keep_alive=None, auto_reconnect=True): """ - Instantiate an EventHubProducer. + Instantiate an EventHubProducer. EventHubProducer should be instantiated by calling the `create_producer` method + in EventHubClient. :param client: The parent EventHubClient. :type client: ~azure.eventhub.client.EventHubClient. @@ -190,39 +194,6 @@ def _build_connection(self, is_reconnect=False): def _reconnect(self): return self._build_connection(is_reconnect=True) - def close(self, exception=None): - # type:(Exception) -> None - """ - Close down the handler. If the handler has already closed, - this will be a no op. An optional exception can be passed in to - indicate that the handler was shutdown due to error. - - :param exception: An optional exception if the handler is closing - due to an error. - :type exception: Exception - - Example: - .. literalinclude:: ../examples/test_examples_eventhub.py - :start-after: [START eventhub_client_sender_close] - :end-before: [END eventhub_client_sender_close] - :language: python - :dedent: 4 - :caption: Close down the handler. - - """ - self.running = False - if self.error: - return - if isinstance(exception, errors.LinkRedirect): - self.redirected = exception - elif isinstance(exception, EventHubError): - self.error = exception - elif exception: - self.error = EventHubError(str(exception)) - else: - self.error = EventHubError("This send handler is now closed.") - self._handler.close() - def _send_event_data(self): self._open() max_retries = self.client.config.max_retries @@ -310,6 +281,23 @@ def _set_partition_key(event_datas, partition_key): ed._set_partition_key(partition_key) yield ed + def _on_outcome(self, outcome, condition): + """ + Called when the outcome is received for a delivery. + + :param outcome: The outcome of the message delivery - success or failure. + :type outcome: ~uamqp.constants.MessageSendResult + :param condition: Detail information of the outcome. + + """ + self._outcome = outcome + self._condition = condition + + @staticmethod + def _error(outcome, condition): + if outcome != constants.MessageSendResult.Ok: + raise condition + def send(self, event_data, partition_key=None): # type:(Union[EventData, Union[List[EventData], Iterator[EventData], Generator[EventData]]], Union[str, bytes]) -> None """ @@ -348,19 +336,35 @@ def send(self, event_data, partition_key=None): self.unsent_events = [wrapper_event_data.message] self._send_event_data() - def _on_outcome(self, outcome, condition): + def close(self, exception=None): + # type:(Exception) -> None """ - Called when the outcome is received for a delivery. + Close down the handler. If the handler has already closed, + this will be a no op. An optional exception can be passed in to + indicate that the handler was shutdown due to error. - :param outcome: The outcome of the message delivery - success or failure. - :type outcome: ~uamqp.constants.MessageSendResult - :param condition: Detail information of the outcome. + :param exception: An optional exception if the handler is closing + due to an error. + :type exception: Exception - """ - self._outcome = outcome - self._condition = condition + Example: + .. literalinclude:: ../examples/test_examples_eventhub.py + :start-after: [START eventhub_client_sender_close] + :end-before: [END eventhub_client_sender_close] + :language: python + :dedent: 4 + :caption: Close down the handler. - @staticmethod - def _error(outcome, condition): - if outcome != constants.MessageSendResult.Ok: - raise condition + """ + self.running = False + if self.error: + return + if isinstance(exception, errors.LinkRedirect): + self.redirected = exception + elif isinstance(exception, EventHubError): + self.error = exception + elif exception: + self.error = EventHubError(str(exception)) + else: + self.error = EventHubError("This send handler is now closed.") + self._handler.close() From ce8b0c98978df8de81baf8df4777db4af00c005a Mon Sep 17 00:00:00 2001 From: yijxie Date: Sun, 23 Jun 2019 20:27:34 -0700 Subject: [PATCH 41/54] code review changes --- .../azure/eventhub/aio/client_async.py | 8 +++++-- .../azure/eventhub/aio/consumer_async.py | 16 +++++++------ .../azure/eventhub/aio/producer_async.py | 24 ++++++++++--------- .../azure-eventhubs/azure/eventhub/client.py | 8 +++++-- .../azure/eventhub/client_abstract.py | 6 ++--- .../azure/eventhub/configuration.py | 2 +- .../azure/eventhub/consumer.py | 15 ++++++------ .../azure-eventhubs/azure/eventhub/error.py | 11 --------- .../azure/eventhub/producer.py | 22 +++++++++-------- .../tests/asynctests/test_send_async.py | 2 +- .../azure-eventhubs/tests/test_send.py | 2 +- 11 files changed, 60 insertions(+), 56 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py index cdcba824ad95..94a56d659d4a 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/client_async.py @@ -14,6 +14,7 @@ AMQPClientAsync, errors, ) +from uamqp import compat from azure.eventhub.error import ConnectError from azure.eventhub.common import parse_sas_token, EventPosition, EventHubSharedKeyCredential, EventHubSASTokenCredential @@ -102,7 +103,7 @@ async def _management_request(self, mgmt_msg, op_type): status_code_field=b'status-code', description_fields=b'status-description') return response - except (errors.AMQPConnectionError, errors.TokenAuthFailure) as failure: + except (errors.AMQPConnectionError, errors.TokenAuthFailure, compat.TimeoutException) as failure: if connect_count >= self.config.max_retries: err = ConnectError( "Can not connect to EventHubs or get management info from the service. " @@ -125,6 +126,7 @@ async def get_properties(self): -'partition_ids' :rtype: dict + :raises: ~azure.eventhub.ConnectError """ mgmt_msg = Message(application_properties={'name': self.eh_name}) response = await self._management_request(mgmt_msg, op_type=b'com.microsoft:eventhub') @@ -142,6 +144,7 @@ async def get_partition_ids(self): Get partition ids of the specified EventHub async. :rtype: list[str] + :raises: ~azure.eventhub.ConnectError """ return (await self.get_properties())['partition_ids'] @@ -162,6 +165,7 @@ async def get_partition_properties(self, partition): :param partition: The target partition id. :type partition: str :rtype: dict + :raises: ~azure.eventhub.ConnectError """ mgmt_msg = Message(application_properties={'name': self.eh_name, 'partition': partition}) @@ -187,7 +191,7 @@ def create_consumer( Create an async consumer to the client for a particular consumer group and partition. :param consumer_group: The name of the consumer group this consumer is associated with. - Events are read in the context of this group. + Events are read in the context of this group. The default consumer_group for an event hub is "$Default". :type consumer_group: str :param partition_id: The identifier of the Event Hub partition from which events will be received. :type partition_id: str diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py index 3ff10b64ed04..03f237da7026 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py @@ -154,12 +154,12 @@ async def __anext__(self): else: log.info("EventHubConsumer timed out. Shutting down.") await self.close(shutdown) - raise TimeoutError(str(shutdown), shutdown) + raise ConnectionLostError(str(shutdown), shutdown) except StopAsyncIteration: raise except Exception as e: - log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Receive failed: {}".format(e)) + log.error("Unexpected error occurred (%r). Shutting down.", e) + error = EventHubError("Receive failed: {}".format(e), e) await self.close(exception=error) raise error @@ -282,8 +282,8 @@ async def _build_connection(self, is_reconnect=False): # pylint: disable=too-ma log.info("EventHubConsumer authentication timed out. Attempting reconnect.") return False except Exception as e: - log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("EventHubConsumer reconnect failed: {}".format(e)) + log.error("Unexpected error occurred when building connection (%r). Shutting down.", e) + error = EventHubError("Unexpected error occurred when building connection", e) await self.close(exception=error) raise error @@ -320,6 +320,8 @@ async def receive(self, max_batch_size=None, timeout=None): If not specified, the default wait time specified when the consumer was created will be used. :type timeout: float :rtype: list[~azure.eventhub.common.EventData] + :raises: ~azure.eventhub.AuthenticationError, ~azure.eventhub.ConnectError, ~azure.eventhub.ConnectionLostError, + ~azure.eventhub.EventHubError Example: .. literalinclude:: ../examples/async_examples/test_examples_eventhub_async.py @@ -394,10 +396,10 @@ async def receive(self, max_batch_size=None, timeout=None): else: log.info("EventHubConsumer timed out. Shutting down.") await self.close(shutdown) - raise TimeoutError(str(shutdown), shutdown) + raise ConnectionLostError(str(shutdown), shutdown) except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Receive failed: {}".format(e)) + error = EventHubError("Receive failed: {}".format(e), e) await self.close(exception=error) raise error diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py index 169ad3a3e702..aef8dc50ff02 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/producer_async.py @@ -5,6 +5,7 @@ import uuid import asyncio import logging +from typing import Iterator, Generator, List, Union from uamqp import constants, errors, compat from uamqp import SendClientAsync @@ -187,8 +188,8 @@ async def _build_connection(self, is_reconnect=False): log.info("EventHubProducer authentication timed out. Attempting reconnect.") return False except Exception as e: - log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("EventHubProducer Reconnect failed: {}".format(e)) + log.info("Unexpected error occurred when building connection (%r). Shutting down.", e) + error = EventHubError("Unexpected error occurred when building connection", e) await self.close(exception=error) raise error @@ -217,7 +218,7 @@ async def _send_event_data(self): errors.MessageContentTooLarge) as msg_error: raise EventDataError(str(msg_error), msg_error) except errors.MessageException as failed: - log.info("Send event data error (%r)", failed) + log.error("Send event data error (%r)", failed) error = EventDataSendError(str(failed), failed) await self.close(exception=error) raise error @@ -264,10 +265,10 @@ async def _send_event_data(self): else: log.info("EventHubProducer timed out. Shutting down.") await self.close(shutdown) - raise TimeoutError(str(shutdown), shutdown) + raise ConnectionLostError(str(shutdown), shutdown) except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Send failed: {}".format(e)) + error = EventHubError("Send failed: {}".format(e), e) await self.close(exception=error) raise error @@ -301,18 +302,18 @@ def _set_partition_key(event_datas, partition_key): yield ed async def send(self, event_data, partition_key=None): - # type:(List[EventData], Union[str, bytes]) -> None + # type:(Union[EventData, Union[List[EventData], Iterator[EventData], Generator[EventData]]], Union[str, bytes]) -> None """ Sends an event data and blocks until acknowledgement is received or operation times out. - :param event_data: The event to be sent. - :type event_data: ~azure.eventhub.common.EventData + :param event_data: The event to be sent. It can be an EventData object, or iterable of EventData objects + :type event_data: ~azure.eventhub.common.EventData, Iterator, Generator, list :param partition_key: With the given partition_key, event data will land to a particular partition of the Event Hub decided by the service. :type partition_key: str - :raises: ~azure.eventhub.common.EventHubError if the message fails to - send. + :raises: ~azure.eventhub.AuthenticationError, ~azure.eventhub.ConnectError, ~azure.eventhub.ConnectionLostError, + ~azure.eventhub.EventDataError, ~azure.eventhub.EventDataSendError, ~azure.eventhub.EventHubError :return: None :rtype: None @@ -331,8 +332,9 @@ async def send(self, event_data, partition_key=None): event_data._set_partition_key(partition_key) wrapper_event_data = event_data else: + event_data_with_pk = self._set_partition_key(event_data, partition_key) wrapper_event_data = _BatchSendEventData( - self._set_partition_key(event_data, partition_key), + event_data_with_pk, partition_key=partition_key) if partition_key else _BatchSendEventData(event_data) wrapper_event_data.message.on_send_complete = self._on_outcome self.unsent_events = [wrapper_event_data.message] diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py index c2dc694bbe9b..f4e0afa2c2fa 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client.py @@ -19,6 +19,7 @@ from uamqp import authentication from uamqp import constants from uamqp import errors +from uamqp import compat from azure.eventhub.producer import EventHubProducer from azure.eventhub.consumer import EventHubConsumer @@ -109,7 +110,7 @@ def _management_request(self, mgmt_msg, op_type): status_code_field=b'status-code', description_fields=b'status-description') return response - except (errors.AMQPConnectionError, errors.TokenAuthFailure) as failure: + except (errors.AMQPConnectionError, errors.TokenAuthFailure, compat.TimeoutException) as failure: if connect_count >= self.config.max_retries: err = ConnectError( "Can not connect to EventHubs or get management info from the service. " @@ -132,6 +133,7 @@ def get_properties(self): -'partition_ids' :rtype: dict + :raises: ~azure.eventhub.ConnectError """ mgmt_msg = Message(application_properties={'name': self.eh_name}) response = self._management_request(mgmt_msg, op_type=b'com.microsoft:eventhub') @@ -149,6 +151,7 @@ def get_partition_ids(self): Get partition ids of the specified EventHub. :rtype: list[str] + :raises: ~azure.eventhub.ConnectError """ return self.get_properties()['partition_ids'] @@ -169,6 +172,7 @@ def get_partition_properties(self, partition): :param partition: The target partition id. :type partition: str :rtype: dict + :raises: ~azure.eventhub.ConnectError """ mgmt_msg = Message(application_properties={'name': self.eh_name, 'partition': partition}) @@ -195,7 +199,7 @@ def create_consumer( Create a consumer to the client for a particular consumer group and partition. :param consumer_group: The name of the consumer group this consumer is associated with. - Events are read in the context of this group. + Events are read in the context of this group. The default consumer_group for an event hub is "$Default". :type consumer_group: str :param partition_id: The identifier of the Event Hub partition from which events will be received. :type partition_id: str diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py index dc041269c5e5..4acde2e87beb 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/client_abstract.py @@ -25,7 +25,7 @@ from typing import Union from azure.eventhub import __version__ -from azure.eventhub.configuration import Configuration +from azure.eventhub.configuration import _Configuration from .common import EventHubSharedKeyCredential, EventHubSASTokenCredential, _Address log = logging.getLogger(__name__) @@ -155,7 +155,7 @@ def __init__(self, host, event_hub_path, credential, **kwargs): self.mgmt_target = "amqps://{}/{}".format(self.host, self.eh_name) self.auth_uri = "sb://{}{}".format(self.address.hostname, self.address.path) self.get_auth = functools.partial(self._create_auth) - self.config = Configuration(**kwargs) + self.config = _Configuration(**kwargs) self.debug = self.config.network_tracing log.info("%r: Created the Event Hub client", self.container_id) @@ -222,7 +222,7 @@ def _process_redirect_uri(self, redirect): def from_connection_string(cls, conn_str, event_hub_path=None, **kwargs): """Create an EventHubClient from an EventHub/IotHub connection string. - :param conn_str: The connection string. + :param conn_str: The connection string of an eventhub or IoT hub :type conn_str: str :param event_hub_path: The path of the specific Event Hub to connect the client to, if the EntityName is not included in the connection string. diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py index b3747a0ef581..27eb649628ec 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/configuration.py @@ -5,7 +5,7 @@ from uamqp.constants import TransportType -class Configuration(object): +class _Configuration(object): def __init__(self, **kwargs): self.user_agent = kwargs.get("user_agent") self.max_retries = kwargs.get("max_retries", 3) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py index bcc3c7e0b0c3..1a87dc03cde7 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py @@ -152,7 +152,7 @@ def __next__(self): else: log.info("EventHubConsumer timed out. Shutting down.") self.close(shutdown) - raise TimeoutError(str(shutdown), shutdown) + raise ConnectionLostError(str(shutdown), shutdown) except StopIteration: raise except KeyboardInterrupt: @@ -161,7 +161,7 @@ def __next__(self): raise except Exception as e: log.error("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Receive failed: {}".format(e)) + error = EventHubError("Receive failed: {}".format(e), e) self.close(exception=error) raise error @@ -300,8 +300,8 @@ def _build_connection(self, is_reconnect=False): log.info("EventHubConsumer authentication timed out. Attempting reconnect.") return False except Exception as e: - log.error("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("EventHubConsumer reconnect failed: {}".format(e)) + log.error("Unexpected error occurred when building connection (%r). Shutting down.", e) + error = EventHubError("Unexpected error occurred when building connection", e) self.close(exception=error) raise error @@ -336,7 +336,8 @@ def receive(self, max_batch_size=None, timeout=None): If not specified, the default wait time specified when the consumer was created will be used. :type timeout: float :rtype: list[~azure.eventhub.common.EventData] - + :raises: ~azure.eventhub.AuthenticationError, ~azure.eventhub.ConnectError, ~azure.eventhub.ConnectionLostError, + ~azure.eventhub.EventHubError Example: .. literalinclude:: ../examples/test_examples_eventhub.py :start-after: [START eventhub_client_sync_receive] @@ -410,14 +411,14 @@ def receive(self, max_batch_size=None, timeout=None): else: log.info("EventHubConsumer timed out. Shutting down.") self.close(shutdown) - raise TimeoutError(str(shutdown), shutdown) + raise ConnectionLostError(str(shutdown), shutdown) except KeyboardInterrupt: log.info("EventHubConsumer stops due to keyboard interrupt") self.close() raise except Exception as e: log.error("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Receive failed: {}".format(e)) + error = EventHubError("Receive failed: {}".format(e), e) self.close(exception=error) raise error diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py index 5921c1b6e7b1..235b50e44298 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/error.py @@ -58,8 +58,6 @@ def __init__(self, message, details=None): self.error = None self.message = message self.details = details - if isinstance(message, constants.MessageSendResult): - self.message = "Message send failed with result: {}".format(message) if details and isinstance(details, Exception): try: condition = details.condition.value.decode('UTF-8') @@ -131,12 +129,3 @@ class EventDataSendError(EventHubError): """ pass - -''' -class ConnectionTimeoutError(ConnectError): - """Time out when accessing event hub service - Should retry? - - """ -''' - diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py index f06471cd078b..3f95b7be08c3 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/producer.py @@ -186,8 +186,8 @@ def _build_connection(self, is_reconnect=False): log.info("EventHubProducer authentication timed out. Attempting reconnect.") return False except Exception as e: - log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("EventHubProducer failed to connect: {}".format(e)) + log.info("Unexpected error occurred when building connection (%r). Shutting down.", e) + error = EventHubError("Unexpected error occurred when building connection", e) self.close(exception=error) raise error @@ -216,7 +216,7 @@ def _send_event_data(self): errors.MessageContentTooLarge) as msg_error: raise EventDataError(str(msg_error), msg_error) except errors.MessageException as failed: - log.info("Send event data error (%r)", failed) + log.error("Send event data error (%r)", failed) error = EventDataSendError(str(failed), failed) self.close(exception=error) raise error @@ -263,10 +263,10 @@ def _send_event_data(self): else: log.info("EventHubProducer timed out. Shutting down.") self.close(shutdown) - raise TimeoutError(str(shutdown), shutdown) + raise ConnectionLostError(str(shutdown), shutdown) except Exception as e: log.info("Unexpected error occurred (%r). Shutting down.", e) - error = EventHubError("Send failed: {}".format(e)) + error = EventHubError("Send failed: {}".format(e), e) self.close(exception=error) raise error @@ -304,13 +304,14 @@ def send(self, event_data, partition_key=None): Sends an event data and blocks until acknowledgement is received or operation times out. - :param event_data: The event to be sent. - :type event_data: ~azure.eventhub.common.EventData + :param event_data: The event to be sent. It can be an EventData object, or iterable of EventData objects + :type event_data: ~azure.eventhub.common.EventData, Iterator, Generator, list :param partition_key: With the given partition_key, event data will land to a particular partition of the Event Hub decided by the service. :type partition_key: str - :raises: ~azure.eventhub.common.EventHubError if the message fails to - send. + :raises: ~azure.eventhub.AuthenticationError, ~azure.eventhub.ConnectError, ~azure.eventhub.ConnectionLostError, + ~azure.eventhub.EventDataError, ~azure.eventhub.EventDataSendError, ~azure.eventhub.EventHubError + :return: None :rtype: None @@ -329,8 +330,9 @@ def send(self, event_data, partition_key=None): event_data._set_partition_key(partition_key) wrapper_event_data = event_data else: + event_data_with_pk = self._set_partition_key(event_data, partition_key) wrapper_event_data = _BatchSendEventData( - self._set_partition_key(event_data, partition_key), + event_data_with_pk, partition_key=partition_key) if partition_key else _BatchSendEventData(event_data) wrapper_event_data.message.on_send_complete = self._on_outcome self.unsent_events = [wrapper_event_data.message] diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py index f969fc533e36..3d5fb70601ea 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_send_async.py @@ -125,7 +125,7 @@ async def test_send_non_ascii_async(connstr_receivers): async with sender: await sender.send(EventData("é,è,à,ù,â,ê,î,ô,û")) await sender.send(EventData(json.dumps({"foo": "漢字"}))) - + await asyncio.sleep(1) partition_0 = receivers[0].receive(timeout=2) assert len(partition_0) == 2 assert partition_0[0].body_as_str() == "é,è,à,ù,â,ê,î,ô,û" diff --git a/sdk/eventhub/azure-eventhubs/tests/test_send.py b/sdk/eventhub/azure-eventhubs/tests/test_send.py index 222c85247f9a..f50ac702fb52 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_send.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_send.py @@ -135,7 +135,7 @@ def test_send_non_ascii(connstr_receivers): with sender: sender.send(EventData(u"é,è,à,ù,â,ê,î,ô,û")) sender.send(EventData(json.dumps({"foo": u"漢字"}))) - + time.sleep(1) partition_0 = receivers[0].receive(timeout=2) assert len(partition_0) == 2 assert partition_0[0].body_as_str() == u"é,è,à,ù,â,ê,î,ô,û" From 8786b9a1cf77a3d61fadd478bb25377f1bad866c Mon Sep 17 00:00:00 2001 From: yijxie Date: Sun, 23 Jun 2019 20:48:34 -0700 Subject: [PATCH 42/54] Add python-dateutil in shared_requirements --- shared_requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/shared_requirements.txt b/shared_requirements.txt index 87b583e26192..46eb35bb0be5 100644 --- a/shared_requirements.txt +++ b/shared_requirements.txt @@ -95,4 +95,5 @@ uamqp~=1.2.0 enum34>=1.0.4 certifi>=2017.4.17 aiohttp>=3.0 -aiodns>=2.0 \ No newline at end of file +aiodns>=2.0 +python-dateutil>=2.8.0 \ No newline at end of file From 041e36b5dc7f6b2e651561c49c6582f9dc9bab86 Mon Sep 17 00:00:00 2001 From: yijxie Date: Sun, 23 Jun 2019 20:59:26 -0700 Subject: [PATCH 43/54] Add aad credential env var to tests.yml --- sdk/eventhub/tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sdk/eventhub/tests.yml b/sdk/eventhub/tests.yml index 96519158a5c9..113900c116d3 100644 --- a/sdk/eventhub/tests.yml +++ b/sdk/eventhub/tests.yml @@ -14,3 +14,6 @@ jobs: EVENT_HUB_NAMESPACE: $(python-eh-livetest-event-hub-namespace) IOTHUB_CONNECTION_STR: $(python-eh-livetest-event-hub-iothub-connection-str) IOTHUB_DEVICE: $(python-eh-livetest-event-hub-iothub-device) + AAD_CLIENT_ID: $(python-eh-livetest-event-hub-aad-client-id) + AAD_TENANT_ID: $(python-eh-livetest-event-hub-aad-tenant-id) + AAD_SECRET: $(python-eh-livetest-event-hub-aad-secret) From 9a4c14c3d33653e7294fef883a033557bb1ad031 Mon Sep 17 00:00:00 2001 From: yijxie Date: Sun, 23 Jun 2019 22:42:46 -0700 Subject: [PATCH 44/54] Change example code assertion for parallel running --- .../examples/async_examples/test_examples_eventhub_async.py | 2 +- sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py b/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py index c22e602781df..048f5af1623c 100644 --- a/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py +++ b/sdk/eventhub/azure-eventhubs/examples/async_examples/test_examples_eventhub_async.py @@ -64,7 +64,7 @@ async def test_example_eventhub_async_send_and_receive(live_eventhub_config): for event_data in received: logger.info("Message received:{}".format(event_data.body_as_str())) # [END eventhub_client_async_receive] - assert len(received) == 1 + assert len(received) > 0 assert received[0].body_as_str() == "A single event" assert list(received[-1].body)[0] == b"A single event" diff --git a/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py b/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py index 5231a76c8a87..d8483dc6c032 100644 --- a/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py +++ b/sdk/eventhub/azure-eventhubs/examples/test_examples_eventhub.py @@ -100,7 +100,7 @@ def test_example_eventhub_sync_send_and_receive(live_eventhub_config): for event_data in received: logger.info("Message received:{}".format(event_data.body_as_str())) # [END eventhub_client_sync_receive] - assert len(received) == 1 + assert len(received) > 0 assert received[0].body_as_str() == "A single event" assert list(received[-1].body)[0] == b"A single event" finally: From 5abdcd780cfb2085472d73c8d57fff120bbbb322 Mon Sep 17 00:00:00 2001 From: yijxie Date: Sun, 23 Jun 2019 22:43:50 -0700 Subject: [PATCH 45/54] Enable iothub receive test case --- sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py index ac5787b6b12e..5bec9b75e6b0 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py @@ -13,13 +13,12 @@ @pytest.mark.liveTest def test_iothub_receive_sync(iot_connection_str, device_id): - pytest.skip("current code will cause ErrorCodes.LinkRedirect") client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') receiver._open() try: partitions = client.get_properties() - assert partitions["partition_ids"] == ["0", "1", "2", "3"] + assert len(partitions["partition_ids"]) >= 2 received = receiver.receive(timeout=5) assert len(received) == 0 finally: From c0990d281f7dd6b703bb6f6622faff1ec6703a63 Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 24 Jun 2019 00:03:33 -0700 Subject: [PATCH 46/54] Revert "Enable iothub receive test case" This reverts commit 5abdcd780cfb2085472d73c8d57fff120bbbb322. --- sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py index 5bec9b75e6b0..ac5787b6b12e 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_iothub_receive.py @@ -13,12 +13,13 @@ @pytest.mark.liveTest def test_iothub_receive_sync(iot_connection_str, device_id): + pytest.skip("current code will cause ErrorCodes.LinkRedirect") client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False) receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events') receiver._open() try: partitions = client.get_properties() - assert len(partitions["partition_ids"]) >= 2 + assert partitions["partition_ids"] == ["0", "1", "2", "3"] received = receiver.receive(timeout=5) assert len(received) == 0 finally: From 65bfec5c7876331284098ed5a9e99a36dddcfe8a Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 24 Jun 2019 00:09:53 -0700 Subject: [PATCH 47/54] fix auth test error --- .../azure-eventhubs/tests/asynctests/test_auth_async.py | 2 +- sdk/eventhub/azure-eventhubs/tests/test_auth.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_auth_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_auth_async.py index 7759d9643394..80d3fb57c7ae 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_auth_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_auth_async.py @@ -27,7 +27,7 @@ async def test_client_secret_credential_async(aad_credential, live_eventhub): credential=credential, user_agent='customized information') sender = client.create_producer(partition_id='0') - receiver = client.create_consumer(consumer_group="$default", partition_id='0', event_position=EventPosition.latest()) + receiver = client.create_consumer(consumer_group="$default", partition_id='0', event_position=EventPosition("@latest")) async with receiver: diff --git a/sdk/eventhub/azure-eventhubs/tests/test_auth.py b/sdk/eventhub/azure-eventhubs/tests/test_auth.py index 5a1a73875483..d5871971a5b4 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_auth.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_auth.py @@ -23,7 +23,7 @@ def test_client_secret_credential(aad_credential, live_eventhub): credential=credential, user_agent='customized information') sender = client.create_producer(partition_id='0') - receiver = client.create_consumer(consumer_group="$default", partition_id='0', event_position=EventPosition.latest()) + receiver = client.create_consumer(consumer_group="$default", partition_id='0', event_position=EventPosition("@latest")) with receiver: received = receiver.receive(timeout=1) From 6d017e18309dd21417011f598c550b2232ca30ed Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 24 Jun 2019 00:38:26 -0700 Subject: [PATCH 48/54] change offset.value to offset --- .../tests/asynctests/test_longrunning_receive_async.py | 2 +- sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py index ced71b804889..f0666094618c 100644 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/test_longrunning_receive_async.py @@ -75,7 +75,7 @@ async def pump(_pid, receiver, _args, _dl): _pid, total, batch[-1].sequence_number, - batch[-1].offset.value)) + batch[-1].offset)) print("{}: total received {}".format( _pid, total)) diff --git a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py index ed612292846d..bbd945d09e9b 100644 --- a/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py +++ b/sdk/eventhub/azure-eventhubs/tests/test_longrunning_receive.py @@ -74,7 +74,7 @@ def pump(receivers, duration): pid, total, batch[-1].sequence_number, - batch[-1].offset.value)) + batch[-1].offset)) print("Total received {}".format(total)) except Exception as e: print("EventHubConsumer failed: {}".format(e)) From 94c2eece8af6a2075753275faa1b79e7ceb68e31 Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 24 Jun 2019 10:18:07 -0700 Subject: [PATCH 49/54] Fix an eventposition problem --- .../azure-eventhubs/azure/eventhub/aio/consumer_async.py | 2 +- sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py index 03f237da7026..6cf020176d96 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/aio/consumer_async.py @@ -350,7 +350,7 @@ async def receive(self, max_batch_size=None, timeout=None): timeout=timeout_ms) for message in message_batch: event_data = EventData(message=message) - self.offset = event_data.offset + self.offset = EventPosition(event_data.offset) data_batch.append(event_data) return data_batch except errors.AuthenticationException as auth_error: diff --git a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py index 1a87dc03cde7..856c77d6fb65 100644 --- a/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py +++ b/sdk/eventhub/azure-eventhubs/azure/eventhub/consumer.py @@ -365,7 +365,7 @@ def receive(self, max_batch_size=None, timeout=None): timeout=timeout_ms) for message in message_batch: event_data = EventData(message=message) - self.offset = event_data.offset + self.offset = EventPosition(event_data.offset) data_batch.append(event_data) return data_batch except errors.AuthenticationException as auth_error: From 59e57b7134aed59bd1d0b668b3c09eca2d25776c Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 24 Jun 2019 11:07:30 -0700 Subject: [PATCH 50/54] Remove path append --- sdk/eventhub/azure-eventhubs/conftest.py | 1 - 1 file changed, 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhubs/conftest.py b/sdk/eventhub/azure-eventhubs/conftest.py index cf1881f76ef7..ff93c54b96a5 100644 --- a/sdk/eventhub/azure-eventhubs/conftest.py +++ b/sdk/eventhub/azure-eventhubs/conftest.py @@ -19,7 +19,6 @@ collect_ignore.append("features") collect_ignore.append("examples/async_examples") else: - sys.path.append(os.path.join(os.path.dirname(__file__), "tests")) from tests.asynctests import MockEventProcessor from azure.eventprocessorhost import EventProcessorHost from azure.eventprocessorhost import EventHubPartitionPump From fe6459b4d6e40670b5e8511e1436dfb3b8392e7c Mon Sep 17 00:00:00 2001 From: scbedd <45376673+scbedd@users.noreply.github.com> Date: Mon, 24 Jun 2019 11:39:25 -0700 Subject: [PATCH 51/54] trying removing the module init py within tests --- sdk/eventhub/azure-eventhubs/conftest.py | 46 +++++++++++++++- .../azure-eventhubs/tests/__init__.py | 0 .../tests/asynctests/__init__.py | 54 ------------------- 3 files changed, 45 insertions(+), 55 deletions(-) delete mode 100644 sdk/eventhub/azure-eventhubs/tests/__init__.py delete mode 100644 sdk/eventhub/azure-eventhubs/tests/asynctests/__init__.py diff --git a/sdk/eventhub/azure-eventhubs/conftest.py b/sdk/eventhub/azure-eventhubs/conftest.py index ff93c54b96a5..1ce3b2181ed3 100644 --- a/sdk/eventhub/azure-eventhubs/conftest.py +++ b/sdk/eventhub/azure-eventhubs/conftest.py @@ -19,7 +19,51 @@ collect_ignore.append("features") collect_ignore.append("examples/async_examples") else: - from tests.asynctests import MockEventProcessor + from azure.eventprocessorhost.abstract_event_processor import AbstractEventProcessor + + class MockEventProcessor(AbstractEventProcessor): + """ + Mock Implmentation of AbstractEventProcessor for testing + """ + def __init__(self, params=None): + """ + Init Event processor + """ + self.params = params + self._msg_counter = 0 + + async def open_async(self, context): + """ + Called by processor host to initialize the event processor. + """ + logging.info("Connection established {}".format(context.partition_id)) + + async def close_async(self, context, reason): + """ + Called by processor host to indicate that the event processor is being stopped. + (Params) Context:Information about the partition + """ + logging.info("Connection closed (reason {}, id {}, offset {}, sq_number {})".format( + reason, context.partition_id, context.offset, context.sequence_number)) + + async def process_events_async(self, context, messages): + """ + Called by the processor host when a batch of events has arrived. + This is where the real work of the event processor is done. + (Params) Context: Information about the partition, Messages: The events to be processed. + """ + logging.info("Events processed {} {}".format(context.partition_id, messages)) + await context.checkpoint_async() + + async def process_error_async(self, context, error): + """ + Called when the underlying client experiences an error while receiving. + EventProcessorHost will take care of recovering from the error and + continuing to pump messages,so no action is required from + (Params) Context: Information about the partition, Error: The error that occured. + """ + logging.error("Event Processor Error {!r}".format(error)) + from azure.eventprocessorhost import EventProcessorHost from azure.eventprocessorhost import EventHubPartitionPump from azure.eventprocessorhost import AzureStorageCheckpointLeaseManager diff --git a/sdk/eventhub/azure-eventhubs/tests/__init__.py b/sdk/eventhub/azure-eventhubs/tests/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/__init__.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/__init__.py deleted file mode 100644 index dc00a0fcfcae..000000000000 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -#-------------------------------------------------------------------------- - -import asyncio -import logging - -from azure.eventprocessorhost.abstract_event_processor import AbstractEventProcessor - - -class MockEventProcessor(AbstractEventProcessor): - """ - Mock Implmentation of AbstractEventProcessor for testing - """ - def __init__(self, params=None): - """ - Init Event processor - """ - self.params = params - self._msg_counter = 0 - - async def open_async(self, context): - """ - Called by processor host to initialize the event processor. - """ - logging.info("Connection established {}".format(context.partition_id)) - - async def close_async(self, context, reason): - """ - Called by processor host to indicate that the event processor is being stopped. - (Params) Context:Information about the partition - """ - logging.info("Connection closed (reason {}, id {}, offset {}, sq_number {})".format( - reason, context.partition_id, context.offset, context.sequence_number)) - - async def process_events_async(self, context, messages): - """ - Called by the processor host when a batch of events has arrived. - This is where the real work of the event processor is done. - (Params) Context: Information about the partition, Messages: The events to be processed. - """ - logging.info("Events processed {} {}".format(context.partition_id, messages)) - await context.checkpoint_async() - - async def process_error_async(self, context, error): - """ - Called when the underlying client experiences an error while receiving. - EventProcessorHost will take care of recovering from the error and - continuing to pump messages,so no action is required from - (Params) Context: Information about the partition, Error: The error that occured. - """ - logging.error("Event Processor Error {!r}".format(error)) \ No newline at end of file From cbbbfc4ddf83777b63eb60e6e042e38661c7055b Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 24 Jun 2019 12:27:24 -0700 Subject: [PATCH 52/54] Separate MockEventProcessor to a different file --- sdk/eventhub/azure-eventhubs/conftest.py | 47 +--------------- .../azure-eventhubs/tests/__init__.py | 0 .../tests/asynctests/mock_event_processor.py | 55 +++++++++++++++++++ 3 files changed, 57 insertions(+), 45 deletions(-) create mode 100644 sdk/eventhub/azure-eventhubs/tests/__init__.py create mode 100644 sdk/eventhub/azure-eventhubs/tests/asynctests/mock_event_processor.py diff --git a/sdk/eventhub/azure-eventhubs/conftest.py b/sdk/eventhub/azure-eventhubs/conftest.py index 1ce3b2181ed3..71f7962176c5 100644 --- a/sdk/eventhub/azure-eventhubs/conftest.py +++ b/sdk/eventhub/azure-eventhubs/conftest.py @@ -19,51 +19,8 @@ collect_ignore.append("features") collect_ignore.append("examples/async_examples") else: - from azure.eventprocessorhost.abstract_event_processor import AbstractEventProcessor - - class MockEventProcessor(AbstractEventProcessor): - """ - Mock Implmentation of AbstractEventProcessor for testing - """ - def __init__(self, params=None): - """ - Init Event processor - """ - self.params = params - self._msg_counter = 0 - - async def open_async(self, context): - """ - Called by processor host to initialize the event processor. - """ - logging.info("Connection established {}".format(context.partition_id)) - - async def close_async(self, context, reason): - """ - Called by processor host to indicate that the event processor is being stopped. - (Params) Context:Information about the partition - """ - logging.info("Connection closed (reason {}, id {}, offset {}, sq_number {})".format( - reason, context.partition_id, context.offset, context.sequence_number)) - - async def process_events_async(self, context, messages): - """ - Called by the processor host when a batch of events has arrived. - This is where the real work of the event processor is done. - (Params) Context: Information about the partition, Messages: The events to be processed. - """ - logging.info("Events processed {} {}".format(context.partition_id, messages)) - await context.checkpoint_async() - - async def process_error_async(self, context, error): - """ - Called when the underlying client experiences an error while receiving. - EventProcessorHost will take care of recovering from the error and - continuing to pump messages,so no action is required from - (Params) Context: Information about the partition, Error: The error that occured. - """ - logging.error("Event Processor Error {!r}".format(error)) - + sys.path.append(os.path.join(os.path.dirname(__file__), "tests")) + from tests.asynctests.mock_event_processor import MockEventProcessor from azure.eventprocessorhost import EventProcessorHost from azure.eventprocessorhost import EventHubPartitionPump from azure.eventprocessorhost import AzureStorageCheckpointLeaseManager diff --git a/sdk/eventhub/azure-eventhubs/tests/__init__.py b/sdk/eventhub/azure-eventhubs/tests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/mock_event_processor.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/mock_event_processor.py new file mode 100644 index 000000000000..e4dd5c75b0e0 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/mock_event_processor.py @@ -0,0 +1,55 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import asyncio +import logging + +from azure.eventprocessorhost.abstract_event_processor import AbstractEventProcessor + + +class MockEventProcessor(AbstractEventProcessor): + """ + Mock Implmentation of AbstractEventProcessor for testing + """ + + def __init__(self, params=None): + """ + Init Event processor + """ + self.params = params + self._msg_counter = 0 + + async def open_async(self, context): + """ + Called by processor host to initialize the event processor. + """ + logging.info("Connection established {}".format(context.partition_id)) + + async def close_async(self, context, reason): + """ + Called by processor host to indicate that the event processor is being stopped. + (Params) Context:Information about the partition + """ + logging.info("Connection closed (reason {}, id {}, offset {}, sq_number {})".format( + reason, context.partition_id, context.offset, context.sequence_number)) + + async def process_events_async(self, context, messages): + """ + Called by the processor host when a batch of events has arrived. + This is where the real work of the event processor is done. + (Params) Context: Information about the partition, Messages: The events to be processed. + """ + logging.info("Events processed {} {}".format(context.partition_id, messages)) + await context.checkpoint_async() + + async def process_error_async(self, context, error): + """ + Called when the underlying client experiences an error while receiving. + EventProcessorHost will take care of recovering from the error and + continuing to pump messages,so no action is required from + (Params) Context: Information about the partition, Error: The error that occured. + """ + logging.error("Event Processor Error {!r}".format(error)) \ No newline at end of file From 4b44f0c34f316ea1fdb6e28797094a795b4c66bb Mon Sep 17 00:00:00 2001 From: yijxie Date: Mon, 24 Jun 2019 12:31:50 -0700 Subject: [PATCH 53/54] remove tests to path --- sdk/eventhub/azure-eventhubs/conftest.py | 1 - 1 file changed, 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhubs/conftest.py b/sdk/eventhub/azure-eventhubs/conftest.py index 71f7962176c5..c3f729d98862 100644 --- a/sdk/eventhub/azure-eventhubs/conftest.py +++ b/sdk/eventhub/azure-eventhubs/conftest.py @@ -19,7 +19,6 @@ collect_ignore.append("features") collect_ignore.append("examples/async_examples") else: - sys.path.append(os.path.join(os.path.dirname(__file__), "tests")) from tests.asynctests.mock_event_processor import MockEventProcessor from azure.eventprocessorhost import EventProcessorHost from azure.eventprocessorhost import EventHubPartitionPump From 41ae796e7515569fac08c231cca528c208fa6b84 Mon Sep 17 00:00:00 2001 From: scbedd <45376673+scbedd@users.noreply.github.com> Date: Mon, 24 Jun 2019 12:48:50 -0700 Subject: [PATCH 54/54] trying a run based on a nested conftest to establish the async fixtures without shattering on python 2.7 --- sdk/eventhub/azure-eventhubs/conftest.py | 24 ------ .../azure-eventhubs/tests/__init__.py | 0 .../tests/asynctests/conftest.py | 82 +++++++++++++++++++ .../tests/asynctests/mock_event_processor.py | 55 ------------- 4 files changed, 82 insertions(+), 79 deletions(-) delete mode 100644 sdk/eventhub/azure-eventhubs/tests/__init__.py create mode 100644 sdk/eventhub/azure-eventhubs/tests/asynctests/conftest.py delete mode 100644 sdk/eventhub/azure-eventhubs/tests/asynctests/mock_event_processor.py diff --git a/sdk/eventhub/azure-eventhubs/conftest.py b/sdk/eventhub/azure-eventhubs/conftest.py index c3f729d98862..6700b06a5afa 100644 --- a/sdk/eventhub/azure-eventhubs/conftest.py +++ b/sdk/eventhub/azure-eventhubs/conftest.py @@ -19,7 +19,6 @@ collect_ignore.append("features") collect_ignore.append("examples/async_examples") else: - from tests.asynctests.mock_event_processor import MockEventProcessor from azure.eventprocessorhost import EventProcessorHost from azure.eventprocessorhost import EventHubPartitionPump from azure.eventprocessorhost import AzureStorageCheckpointLeaseManager @@ -234,29 +233,6 @@ def storage_clm(eph): except: warnings.warn(UserWarning("storage container teardown failed")) - -@pytest.fixture() -def eph(): - try: - storage_clm = AzureStorageCheckpointLeaseManager( - os.environ['AZURE_STORAGE_ACCOUNT'], - os.environ['AZURE_STORAGE_ACCESS_KEY'], - "lease") - NAMESPACE = os.environ.get('EVENT_HUB_NAMESPACE') - EVENTHUB = os.environ.get('EVENT_HUB_NAME') - USER = os.environ.get('EVENT_HUB_SAS_POLICY') - KEY = os.environ.get('EVENT_HUB_SAS_KEY') - - eh_config = EventHubConfig(NAMESPACE, EVENTHUB, USER, KEY, consumer_group="$default") - host = EventProcessorHost( - MockEventProcessor, - eh_config, - storage_clm) - except KeyError: - pytest.skip("Live EventHub configuration not found.") - return host - - @pytest.fixture() def eh_partition_pump(eph): lease = AzureBlobLease() diff --git a/sdk/eventhub/azure-eventhubs/tests/__init__.py b/sdk/eventhub/azure-eventhubs/tests/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/conftest.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/conftest.py new file mode 100644 index 000000000000..7af6b48315e7 --- /dev/null +++ b/sdk/eventhub/azure-eventhubs/tests/asynctests/conftest.py @@ -0,0 +1,82 @@ +import sys +import pytest +import os + +if sys.version_info >= (3, 5): + import asyncio + import logging + + from azure.eventprocessorhost.abstract_event_processor import AbstractEventProcessor + from azure.eventprocessorhost import EventProcessorHost + from azure.eventprocessorhost import EventHubPartitionPump + from azure.eventprocessorhost import AzureStorageCheckpointLeaseManager + from azure.eventprocessorhost import AzureBlobLease + from azure.eventprocessorhost import EventHubConfig + from azure.eventprocessorhost.lease import Lease + from azure.eventprocessorhost.partition_pump import PartitionPump + from azure.eventprocessorhost.partition_manager import PartitionManager + + class MockEventProcessor(AbstractEventProcessor): + """ + Mock Implmentation of AbstractEventProcessor for testing + """ + + def __init__(self, params=None): + """ + Init Event processor + """ + self.params = params + self._msg_counter = 0 + + async def open_async(self, context): + """ + Called by processor host to initialize the event processor. + """ + logging.info("Connection established {}".format(context.partition_id)) + + async def close_async(self, context, reason): + """ + Called by processor host to indicate that the event processor is being stopped. + (Params) Context:Information about the partition + """ + logging.info("Connection closed (reason {}, id {}, offset {}, sq_number {})".format( + reason, context.partition_id, context.offset, context.sequence_number)) + + async def process_events_async(self, context, messages): + """ + Called by the processor host when a batch of events has arrived. + This is where the real work of the event processor is done. + (Params) Context: Information about the partition, Messages: The events to be processed. + """ + logging.info("Events processed {} {}".format(context.partition_id, messages)) + await context.checkpoint_async() + + async def process_error_async(self, context, error): + """ + Called when the underlying client experiences an error while receiving. + EventProcessorHost will take care of recovering from the error and + continuing to pump messages,so no action is required from + (Params) Context: Information about the partition, Error: The error that occured. + """ + logging.error("Event Processor Error {!r}".format(error)) + +@pytest.fixture() +def eph(): + try: + storage_clm = AzureStorageCheckpointLeaseManager( + os.environ['AZURE_STORAGE_ACCOUNT'], + os.environ['AZURE_STORAGE_ACCESS_KEY'], + "lease") + NAMESPACE = os.environ.get('EVENT_HUB_NAMESPACE') + EVENTHUB = os.environ.get('EVENT_HUB_NAME') + USER = os.environ.get('EVENT_HUB_SAS_POLICY') + KEY = os.environ.get('EVENT_HUB_SAS_KEY') + + eh_config = EventHubConfig(NAMESPACE, EVENTHUB, USER, KEY, consumer_group="$default") + host = EventProcessorHost( + MockEventProcessor, + eh_config, + storage_clm) + except KeyError: + pytest.skip("Live EventHub configuration not found.") + return host \ No newline at end of file diff --git a/sdk/eventhub/azure-eventhubs/tests/asynctests/mock_event_processor.py b/sdk/eventhub/azure-eventhubs/tests/asynctests/mock_event_processor.py deleted file mode 100644 index e4dd5c75b0e0..000000000000 --- a/sdk/eventhub/azure-eventhubs/tests/asynctests/mock_event_processor.py +++ /dev/null @@ -1,55 +0,0 @@ -#------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -#-------------------------------------------------------------------------- - -import asyncio -import logging - -from azure.eventprocessorhost.abstract_event_processor import AbstractEventProcessor - - -class MockEventProcessor(AbstractEventProcessor): - """ - Mock Implmentation of AbstractEventProcessor for testing - """ - - def __init__(self, params=None): - """ - Init Event processor - """ - self.params = params - self._msg_counter = 0 - - async def open_async(self, context): - """ - Called by processor host to initialize the event processor. - """ - logging.info("Connection established {}".format(context.partition_id)) - - async def close_async(self, context, reason): - """ - Called by processor host to indicate that the event processor is being stopped. - (Params) Context:Information about the partition - """ - logging.info("Connection closed (reason {}, id {}, offset {}, sq_number {})".format( - reason, context.partition_id, context.offset, context.sequence_number)) - - async def process_events_async(self, context, messages): - """ - Called by the processor host when a batch of events has arrived. - This is where the real work of the event processor is done. - (Params) Context: Information about the partition, Messages: The events to be processed. - """ - logging.info("Events processed {} {}".format(context.partition_id, messages)) - await context.checkpoint_async() - - async def process_error_async(self, context, error): - """ - Called when the underlying client experiences an error while receiving. - EventProcessorHost will take care of recovering from the error and - continuing to pump messages,so no action is required from - (Params) Context: Information about the partition, Error: The error that occured. - """ - logging.error("Event Processor Error {!r}".format(error)) \ No newline at end of file