From ed2df7b24c47f83d986bc759155e06473f2b1c71 Mon Sep 17 00:00:00 2001 From: annatisch Date: Mon, 25 Jun 2018 08:15:20 -0700 Subject: [PATCH 01/11] Started adding scenario tests --- features/eph.feature | 21 ++++++++++++ features/eventhub.feature | 67 ++++++++++++++++++++++++++++++++++++++ features/steps/eventhub.py | 13 ++++++++ 3 files changed, 101 insertions(+) create mode 100644 features/eph.feature create mode 100644 features/eventhub.feature create mode 100644 features/steps/eventhub.py diff --git a/features/eph.feature b/features/eph.feature new file mode 100644 index 0000000..2742eaf --- /dev/null +++ b/features/eph.feature @@ -0,0 +1,21 @@ +Feature: Exercising Event Processor Host + +# Scenario: EPH single host, generic scenario. + +# Scenario: EPH runs with listen only claims. + +# Scenario: Host runs idle for a while by managing sender to send in intervals. + +# Scenario: No sends at all, hosts will stay idle. + +# Scenario: Spawns multiple test processes consuming from the same event hub. + +# Scenario: Registers and unregisters hosts as part of the regular iteration to introduce excessive partition moves. + + Scenario: Registers and unregisters hosts as part of the regular iteration to introduce excessive partition moves. No sends in this scenario. + + Scenario: Runs EPH on 256 partition entity. + +# Scenario: Runs EPH on multiple consumer groups. + +# Scenario: Runs EPH with web sockets enabled. \ No newline at end of file diff --git a/features/eventhub.feature b/features/eventhub.feature new file mode 100644 index 0000000..db09254 --- /dev/null +++ b/features/eventhub.feature @@ -0,0 +1,67 @@ +Feature: Exercising EventHub SDK + +# Scenario: Just sends for 3 days, no receives. Focus on send failures only. + + @long-running + Scenario: Generic send and receive on client for 3 days. + Given the EventHub SDK is installed + And an EventHub is created with credentials retrieved + When I send and receive messages for 72 hours + Then I should receive no errors + And I can shutdown the sender and receiver cleanly. + +# Scenario: Sender stays idle for 45 minutes and sends some number of messages after each idle duration. + +# Scenario: Sends on partition senders. + +# Scenario: Send and receive to/from a multiple consumer group entity. + +# Scenario: Sends and receives 246KB size messages. + + @long-running + Scenario: Runs on a 100TU namespace and saturates ingress. + Given the EventHub SDK is installed + And an EventHub with 100TU is created with credentials retrieved + When I send messages for 2 hours + Then I should achieve throughput of greater than 3600000 messages + And I should receive no errors + And I can shutdown the sender cleanly. + + @long-running + Scenario: Runs on a 100TU namespace and saturates ingress with partition senders for 3 days. + Given the EventHub SDK is installed + And an EventHub with 100TU is created with credentials retrieved + When I send messages to partitions for 2 hours + Then I should achieve throughput of greater than 1800000 messages to each partition + And I should receive no errors + And I can shutdown the sender cleanly. + +# Scenario: Sends and receives 1 byte size messages. + +# Scenario: Single clients parks 500 async sends. + +# Scenario: Sends a set of messages and keeps receiving same set of messages again and again. + +# Scenario: Receives with 60 minutes of receive timeout. + +# Scenario: Receives with 3 seconds of receive timeout. + +# Scenario: Recreates receivers at the beginning of each iteration. + +# Scenario: Recreates receivers with the last known sequence number at the beginning of each iteration. + +# Scenario: Uses epoch receivers. + +# Scenario: Introduces a short idle time after each receive attempt. We use 50 seconds of sleep here. + +# Scenario: Uses pump receivers to receive messages. + +# Scenario: Sends messages with partition key set. + +# Scenario: Issues runtime information API calls as part of send and receive. + +# Scenario: Uses batch sender to send messages. + +# Scenario: Sends and receives by enabling web sockets over AMQP. + +# Scenario: Issues runtime information API calls over web sockets as part of send and receive. diff --git a/features/steps/eventhub.py b/features/steps/eventhub.py new file mode 100644 index 0000000..1e2341d --- /dev/null +++ b/features/steps/eventhub.py @@ -0,0 +1,13 @@ +from behave import * + +@given('we have behave installed') +def step_impl(context): + pass + +@when('we implement a test') +def step_impl(context): + assert True is not False + +@then('behave will test it for us!') +def step_impl(context): + assert context.failed is False \ No newline at end of file From e57776d016c08c6a454557290bda6a6aba830bee Mon Sep 17 00:00:00 2001 From: annatisch Date: Mon, 25 Jun 2018 10:18:46 -0700 Subject: [PATCH 02/11] More test scenarios --- .gitignore | 6 +++ azure/eventhub/__init__.py | 2 +- azure/eventprocessorhost/partition_context.py | 6 ++- azure/eventprocessorhost/partition_pump.py | 2 +- dev_requirements.txt | 3 +- features/eph.feature | 5 ++ features/eventhub.feature | 16 ++++-- features/steps/eventhub.py | 50 +++++++++++++++++-- features/steps/test_utils.py | 34 +++++++++++++ setup.py | 2 +- 10 files changed, 112 insertions(+), 14 deletions(-) create mode 100644 features/steps/test_utils.py diff --git a/.gitignore b/.gitignore index 27e1a0e..ef97309 100644 --- a/.gitignore +++ b/.gitignore @@ -105,3 +105,9 @@ ENV/ .mypy_cache/ .pytest_cache/v/cache/lastfailed .pytest_cache/v/cache/nodeids + +# EventHub +azure/mgmt/ +azure/common/ +azure/profiles/ +features/steps/mgmt_settings_real.py diff --git a/azure/eventhub/__init__.py b/azure/eventhub/__init__.py index 8ad8448..072de4d 100644 --- a/azure/eventhub/__init__.py +++ b/azure/eventhub/__init__.py @@ -430,7 +430,7 @@ def on_message(self, event): Callback to process a received message and wrap it in EventData. Will also call a user supplied callback. :param event: The received message. - :type event: ~uamqp.Message + :type event: ~uamqp.message.Message :returns: ~azure.eventhub.EventData. """ event_data = EventData(message=event) diff --git a/azure/eventprocessorhost/partition_context.py b/azure/eventprocessorhost/partition_context.py index 4858adc..d4cfa19 100644 --- a/azure/eventprocessorhost/partition_context.py +++ b/azure/eventprocessorhost/partition_context.py @@ -88,6 +88,7 @@ def to_string(self): """ Returns the parition context in the following format: "PartitionContext({EventHubPath}{ConsumerGroupName}{PartitionId}{SequenceNumber})" + :returns: str """ return "PartitionContext({}{}{}{})".format(self.eh_path, @@ -97,9 +98,10 @@ def to_string(self): async def persist_checkpoint_async(self, checkpoint): """ - Persists the checkpoint + Persists the checkpoint. + :param checkpoint: The checkpoint to persist. - :type checkpoint: ~azure.eventprocessorhost.Checkpoint + :type checkpoint: ~azure.eventprocessorhost.checkpoint.Checkpoint """ _logger.debug("PartitionPumpCheckpointStart {} {} {} {}".format( self.host.guid, checkpoint.partition_id, checkpoint.offset, checkpoint.sequence_number)) diff --git a/azure/eventprocessorhost/partition_pump.py b/azure/eventprocessorhost/partition_pump.py index 6caa981..1327463 100644 --- a/azure/eventprocessorhost/partition_pump.py +++ b/azure/eventprocessorhost/partition_pump.py @@ -43,7 +43,7 @@ def set_lease(self, new_lease): """ Sets a new partition lease to be processed by the pump :param lease: The lease to set. - :type lease: ~azure.eventprocessorhost.Lease + :type lease: ~azure.eventprocessorhost.lease.Lease """ if self.partition_context: self.partition_context.lease = new_lease diff --git a/dev_requirements.txt b/dev_requirements.txt index a6bbee0..3cbeb9a 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -2,4 +2,5 @@ pytest>=3.4.1 pytest-asyncio>=0.8.0 docutils>=0.14 pygments>=2.2.0 -pylint==1.8.4 \ No newline at end of file +pylint==1.8.4 +behave==1.2.6 \ No newline at end of file diff --git a/features/eph.feature b/features/eph.feature index 2742eaf..73fc8bc 100644 --- a/features/eph.feature +++ b/features/eph.feature @@ -1,3 +1,8 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + Feature: Exercising Event Processor Host # Scenario: EPH single host, generic scenario. diff --git a/features/eventhub.feature b/features/eventhub.feature index db09254..c96a998 100644 --- a/features/eventhub.feature +++ b/features/eventhub.feature @@ -1,3 +1,8 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + Feature: Exercising EventHub SDK # Scenario: Just sends for 3 days, no receives. Focus on send failures only. @@ -8,7 +13,8 @@ Feature: Exercising EventHub SDK And an EventHub is created with credentials retrieved When I send and receive messages for 72 hours Then I should receive no errors - And I can shutdown the sender and receiver cleanly. + And I can shutdown the sender and receiver cleanly + And I remove the EventHub # Scenario: Sender stays idle for 45 minutes and sends some number of messages after each idle duration. @@ -25,16 +31,18 @@ Feature: Exercising EventHub SDK When I send messages for 2 hours Then I should achieve throughput of greater than 3600000 messages And I should receive no errors - And I can shutdown the sender cleanly. + And I can shutdown the sender cleanly + And I remove the EventHub @long-running Scenario: Runs on a 100TU namespace and saturates ingress with partition senders for 3 days. Given the EventHub SDK is installed And an EventHub with 100TU is created with credentials retrieved When I send messages to partitions for 2 hours - Then I should achieve throughput of greater than 1800000 messages to each partition + Then I should achieve throughput of greater than 1800000 messages from each partition And I should receive no errors - And I can shutdown the sender cleanly. + And I can shutdown the sender cleanly + And I remove the EventHub # Scenario: Sends and receives 1 byte size messages. diff --git a/features/steps/eventhub.py b/features/steps/eventhub.py index 1e2341d..b92bdc5 100644 --- a/features/steps/eventhub.py +++ b/features/steps/eventhub.py @@ -1,13 +1,55 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import asyncio +import uuid + from behave import * -@given('we have behave installed') +import test_utils + +@given('the EventHub SDK is installed') def step_impl(context): - pass + from azure import eventhub -@when('we implement a test') +@given('an EventHub is created with credentials retrieved') def step_impl(context): + #from mgmt_settings_real import get_credentials, SUBSCRIPTION_ID + #rg, mgmt_client = test_utils.create_mgmt_client(get_credentials(), SUBSCRIPTION_ID) + context.eh_config = test_utils.get_eventhub_config() + +@given('an EventHub with {properties} is created with credentials retrieved') +def step_impl(context, properties): + #from mgmt_settings_real import get_credentials, SUBSCRIPTION_ID + #rg, mgmt_client = test_utils.create_mgmt_client(get_credentials(), SUBSCRIPTION_ID) + context.eh_config = test_utils.get_eventhub_config() + +@when('I {clients} messages for {hours} hours') +def step_impl(context, clients, hours): + assert True is not False + +@when('I {clients} messages {destination} for {hours} hours') +def step_impl(context, clients, destination, hours): assert True is not False -@then('behave will test it for us!') +@then('I should receive no errors') +def step_impl(context): + assert context.failed is False + +@then('I can shutdown the {clients} cleanly') +def step_impl(context, clients): + assert context.failed is False + +@then('I should achieve throughput of greater than {total} messages') +def step_impl(context, total): + assert context.failed is False + +@then('I should achieve throughput of greater than {total} messages from {source}') +def step_impl(context, total, source): + assert context.failed is False + +@then('I remove the EventHub') def step_impl(context): assert context.failed is False \ No newline at end of file diff --git a/features/steps/test_utils.py b/features/steps/test_utils.py new file mode 100644 index 0000000..31eb7b5 --- /dev/null +++ b/features/steps/test_utils.py @@ -0,0 +1,34 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import uuid + +def create_mgmt_client(credentials, subscription, location='westus'): + from azure.mgmt.resource import ResourceManagementClient + from azure.mgmt.eventhub import EventHubManagementClient + + resource_client = ResourceManagementClient(credentials, subscription) + rg_name = 'pytest-{}'.format(uuid.uuid4()) + resource_group = resource_client.resource_groups.create_or_update( + rg_name, {'location': location}) + + eh_client = EventHubManagementClient(credentials, subscription) + namespace = 'pytest-{}'.format(uuid.uuid4()) + creator = eh_client.namespaces.create_or_update( + resource_group.name, + namespace) + create.wait() + return resource_group, eh_client + + +def get_eventhub_config(): + config = {} + config['hostname'] = os.environ['EVENT_HUB_HOSTNAME'] + config['event_hub'] = os.environ['EVENT_HUB_NAME'] + config['key_name'] = os.environ['EVENT_HUB_SAS_POLICY'] + config['access_key'] = os.environ['EVENT_HUB_SAS_KEY'] + config['consumer_group'] = "$Default" + config['partition'] = "0" + return config diff --git a/setup.py b/setup.py index 66f5465..1cc16ce 100644 --- a/setup.py +++ b/setup.py @@ -55,7 +55,7 @@ zip_safe=False, packages=find_packages(exclude=["examples", "tests"]), install_requires=[ - 'uamqp~=0.1.0rc1', + 'uamqp==0.1.0rc1', 'msrestazure~=0.4.11', 'azure-common~=1.1', 'azure-storage~=0.36.0' From 684f1f3c004e8d8473683a30388a829ee8b78cd0 Mon Sep 17 00:00:00 2001 From: annatisch Date: Mon, 25 Jun 2018 11:21:38 -0700 Subject: [PATCH 03/11] Better docstring formatting --- azure/eventhub/__init__.py | 77 +++++++++++++------ azure/eventhub/async/__init__.py | 36 +++++---- .../abstract_checkpoint_manager.py | 20 +++-- .../abstract_event_processor.py | 16 ++-- .../abstract_lease_manager.py | 47 +++++++---- azure/eventprocessorhost/azure_blob_lease.py | 12 +-- .../azure_storage_checkpoint_manager.py | 75 ++++++++++++++---- .../eventprocessorhost/cancellation_token.py | 4 +- azure/eventprocessorhost/checkpoint.py | 8 +- azure/eventprocessorhost/eh_config.py | 5 ++ azure/eventprocessorhost/eh_partition_pump.py | 21 ++--- azure/eventprocessorhost/eph.py | 19 ++--- azure/eventprocessorhost/lease.py | 11 ++- azure/eventprocessorhost/partition_context.py | 13 ++-- azure/eventprocessorhost/partition_manager.py | 58 ++++++++++---- azure/eventprocessorhost/partition_pump.py | 20 +++-- 16 files changed, 304 insertions(+), 138 deletions(-) diff --git a/azure/eventhub/__init__.py b/azure/eventhub/__init__.py index 072de4d..7892358 100644 --- a/azure/eventhub/__init__.py +++ b/azure/eventhub/__init__.py @@ -106,6 +106,7 @@ def __init__(self, address, username=None, password=None, debug=False): def from_connection_string(cls, conn_str, eventhub=None, **kwargs): """ Create an EventHubClient from a connection string. + :param conn_str: The connection string. :type conn_str: str :param eventhub: The name of the EventHub, if the EntityName is @@ -135,7 +136,7 @@ def _create_properties(self): # pylint: disable=no-self-use Format the properties with which to instantiate the connection. This acts like a user agent over HTTP. - :returns: dict + :rtype: dict """ properties = {} properties["product"] = "eventhub.python" @@ -146,7 +147,7 @@ def _create_properties(self): # pylint: disable=no-self-use def _create_connection(self): """ - Create a new ~uamqp.Connection instance that will be shared between all + Create a new ~uamqp.connection.Connection instance that will be shared between all Sender/Receiver clients. """ if not self.connection: @@ -179,7 +180,7 @@ def run(self): Run the EventHubClient in blocking mode. Opens the connection and starts running all Sender/Receiver clients. - :returns: ~azure.eventhub.EventHubClient + :rtype: ~azure.eventhub.EventHubClient """ log.info("{}: Starting {} clients".format(self.container_id, len(self.clients))) self._create_connection() @@ -205,7 +206,8 @@ def get_eventhub_info(self): -'created_at' -'partition_count' -'partition_ids' - :returns: dict + + :rtype: dict """ self._create_connection() eh_name = self.address.path.lstrip('/') @@ -237,6 +239,7 @@ def get_eventhub_info(self): def add_receiver(self, consumer_group, partition, offset=None, prefetch=300): """ Add a receiver to the client for a particular consumer group and partition. + :param consumer_group: The name of the consumer group. :type consumer_group: str :param partition: The ID of the partition. @@ -245,7 +248,7 @@ def add_receiver(self, consumer_group, partition, offset=None, prefetch=300): :type offset: ~azure.eventhub.Offset :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int - :returns: ~azure.eventhub.Receiver + :rtype: ~azure.eventhub.Receiver """ source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, self.address.path, consumer_group, partition) @@ -262,6 +265,7 @@ def add_epoch_receiver(self, consumer_group, partition, epoch, prefetch=300): can connect to a partition at any given time - additional epoch receivers must have a higher epoch value or they will be rejected. If a 2nd epoch receiver has connected, the first will be closed. + :param consumer_group: The name of the consumer group. :type consumer_group: str :param partition: The ID of the partition. @@ -270,7 +274,7 @@ def add_epoch_receiver(self, consumer_group, partition, epoch, prefetch=300): :type epoch: int :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int - :returns: ~azure.eventhub.Receiver + :rtype: ~azure.eventhub.Receiver """ source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, self.address.path, consumer_group, partition) @@ -282,11 +286,12 @@ def add_sender(self, partition=None): """ Add a sender to the client to send ~azure.eventhub.EventData object to an EventHub. + :param partition: Optionally specify a particular partition to send to. If omitted, the events will be distributed to available partitions via - round-robin + round-robin. :type parition: str - :returns: ~azure.eventhub.Sender + :rtype: ~azure.eventhub.Sender """ target = "amqps://{}{}".format(self.address.hostname, self.address.path) handler = Sender(self, target, partition=partition) @@ -303,6 +308,7 @@ class Sender: def __init__(self, client, target, partition=None): """ Instantiate an EventHub event Sender client. + :param client: The parent EventHubClient. :type client: ~azure.eventhub.EventHubClient. :param target: The URI of the EventHub to send to. @@ -323,11 +329,13 @@ def send(self, event_data): """ Sends an event data and blocks until acknowledgement is received or operation times out. + :param event_data: The event to be sent. :type event_data: ~azure.eventhub.EventData :raises: ~azure.eventhub.EventHubError if the message fails to send. - :returns: The outcome of the message send ~uamqp.constants.MessageSendResult + :return: The outcome of the message send. + :rtype: ~uamqp.constants.MessageSendResult """ if event_data.partition_key and self.partition: raise ValueError("EventData partition key cannot be used with a partition sender.") @@ -344,6 +352,7 @@ def send(self, event_data): def transfer(self, event_data, callback=None): """ Transfers an event data and notifies the callback when the operation is done. + :param event_data: The event to be sent. :type event_data: ~azure.eventhub.EventData :param callback: Callback to be run once the message has been send. @@ -368,6 +377,7 @@ def wait(self): def _on_outcome(self, outcome, condition): """ Called when the outcome is received for a delivery. + :param outcome: The outcome of the message delivery - success or failure. :type outcome: ~uamqp.constants.MessageSendResult """ @@ -389,10 +399,11 @@ class Receiver: def __init__(self, client, source, prefetch=300, epoch=None): """ Instantiate a receiver. + :param client: The parent EventHubClient. :type client: ~azure.eventhub.EventHubClient :param source: The source EventHub from which to receive events. - :type source: ~uamqp.Source + :type source: ~uamqp.address.Source :param prefetch: The number of events to prefetch from the service for processing. Default is 300. :type prefetch: int @@ -418,7 +429,8 @@ def __init__(self, client, source, prefetch=300, epoch=None): def queue_size(self): """ The current size of the unprocessed message queue. - :returns: int + + :rtype: int """ # pylint: disable=protected-access if self._handler._received_messages: @@ -429,9 +441,10 @@ def on_message(self, event): """ Callback to process a received message and wrap it in EventData. Will also call a user supplied callback. + :param event: The received message. :type event: ~uamqp.message.Message - :returns: ~azure.eventhub.EventData. + :rtype: ~azure.eventhub.EventData. """ event_data = EventData(message=event) if self._callback: @@ -442,6 +455,7 @@ def on_message(self, event): def receive(self, max_batch_size=None, callback=None, timeout=None): """ Receive events from the EventHub. + :param max_batch_size: Receive a batch of events. Batch size will be up to the maximum specified, but will return as soon as service returns no new events. If combined with a timeout and no events are @@ -452,7 +466,7 @@ def receive(self, max_batch_size=None, callback=None, timeout=None): be a function that accepts a single argument - the event data. This callback will be run before the message is returned in the result generator. :type callback: func[~azure.eventhub.EventData] - :returns: list[~azure.eventhub.EventData] + :rtype: list[~azure.eventhub.EventData] """ try: timeout_ms = 1000 * timeout if timeout else 0 @@ -478,9 +492,10 @@ def receive(self, max_batch_size=None, callback=None, timeout=None): def selector(self, default): """ Create a selector for the current offset if it is set. + :param default: The fallback receive offset. :type default: ~azure.eventhub.Offset - :returns: ~azure.eventhub.Offset + :rtype: ~azure.eventhub.Offset """ if self.offset is not None: return Offset(self.offset).selector() @@ -490,7 +505,7 @@ def selector(self, default): class EventData(object): """ The EventData class is a holder of event content. - Acts as a wrapper to an ~uamqp.Message object. + Acts as a wrapper to an ~uamqp.message.Message object. """ PROP_SEQ_NUMBER = b"x-opt-sequence-number" @@ -501,13 +516,14 @@ class EventData(object): def __init__(self, body=None, batch=None, message=None): """ - Initialize EventData + Initialize EventData. + :param body: The data to send in a single message. :type body: str, bytes or list :param batch: A data generator to send batched messages. :type batch: Generator :param message: The received message. - :type message: ~uamqp.Message + :type message: ~uamqp.message.Message """ self._partition_key = types.AMQPSymbol(EventData.PROP_PARTITION_KEY) self._annotations = {} @@ -536,7 +552,8 @@ def __init__(self, body=None, batch=None, message=None): def sequence_number(self): """ The sequence number of the event data object. - :returns: int + + :rtype: int """ return self._annotations.get(EventData.PROP_SEQ_NUMBER, None) @@ -544,7 +561,8 @@ def sequence_number(self): def offset(self): """ The offset of the event data object. - :returns: int + + :rtype: int """ try: return self._annotations[EventData.PROP_OFFSET].decode('UTF-8') @@ -555,7 +573,8 @@ def offset(self): def enqueued_time(self): """ The enqueued timestamp of the event data object. - :returns: datetime.datetime + + :rtype: datetime.datetime """ timestamp = self._annotations.get(EventData.PROP_TIMESTAMP, None) if timestamp: @@ -567,7 +586,8 @@ def device_id(self): """ The device ID of the event data object. This is only used for IoT Hub implementations. - :returns: bytes + + :rtype: bytes """ return self._annotations.get(EventData.PROP_DEVICE_ID, None) @@ -575,7 +595,8 @@ def device_id(self): def partition_key(self): """ The partition key of the event data object. - :returns: bytes + + :rtype: bytes """ try: return self._annotations[self._partition_key] @@ -586,6 +607,7 @@ def partition_key(self): def partition_key(self, value): """ Set the partition key of the event data object. + :param value: The partition key to set. :type value: str or bytes """ @@ -599,7 +621,8 @@ def partition_key(self, value): def properties(self): """ Application defined properties on the message. - :returns: dict + + :rtype: dict """ return self._properties @@ -607,6 +630,7 @@ def properties(self): def properties(self, value): """ Application defined properties on the message. + :param value: The application properties for the EventData. :type value: dict """ @@ -618,7 +642,8 @@ def properties(self, value): def body(self): """ The body of the event data object. - :returns: bytes or generator[bytes] + + :rtype: bytes or generator[bytes] """ return self.message.get_data() @@ -643,6 +668,7 @@ class Offset(object): def __init__(self, value, inclusive=False): """ Initialize Offset. + :param value: The offset value. :type value: ~datetime.datetime or int or str :param inclusive: Whether to include the supplied value as the start point. @@ -654,7 +680,8 @@ def __init__(self, value, inclusive=False): def selector(self): """ Creates a selector expression of the offset. - :returns: bytes + + :rtype: bytes """ operator = ">=" if self.inclusive else ">" if isinstance(self.value, datetime.datetime): diff --git a/azure/eventhub/async/__init__.py b/azure/eventhub/async/__init__.py index ba75500..e2e80d5 100644 --- a/azure/eventhub/async/__init__.py +++ b/azure/eventhub/async/__init__.py @@ -34,7 +34,7 @@ class EventHubClientAsync(EventHubClient): def _create_auth(self, auth_uri, username, password): # pylint: disable=no-self-use """ - Create an ~uamqp.authentication.SASTokenAuthAsync instance to authenticate + Create an ~uamqp.async.authentication_async.SASTokenAuthAsync instance to authenticate the session. :param auth_uri: The URI to authenticate against. @@ -48,7 +48,7 @@ def _create_auth(self, auth_uri, username, password): # pylint: disable=no-self def _create_connection_async(self): """ - Create a new ~uamqp.ConnectionAsync instance that will be shared between all + Create a new ~uamqp.async.connection_async.ConnectionAsync instance that will be shared between all AsyncSender/AsyncReceiver clients. """ if not self.connection: @@ -81,7 +81,7 @@ async def run_async(self): Run the EventHubClient asynchronously. Opens the connection and starts running all AsyncSender/AsyncReceiver clients. - :returns: ~azure.eventhub.EventHubClientAsync + :rtype: ~azure.eventhub.async.EventHubClientAsync """ log.info("{}: Starting {} clients".format(self.container_id, len(self.clients))) self._create_connection_async() @@ -101,7 +101,8 @@ async def stop_async(self): async def get_eventhub_info_async(self): """ Get details on the specified EventHub async. - :returns: dict + + :rtype: dict """ eh_name = self.address.path.lstrip('/') target = "amqps://{}/{}".format(self.address.hostname, eh_name) @@ -126,6 +127,7 @@ async def get_eventhub_info_async(self): def add_async_receiver(self, consumer_group, partition, offset=None, prefetch=300, loop=None): """ Add an async receiver to the client for a particular consumer group and partition. + :param consumer_group: The name of the consumer group. :type consumer_group: str :param partition: The ID of the partition. @@ -134,7 +136,7 @@ def add_async_receiver(self, consumer_group, partition, offset=None, prefetch=30 :type offset: ~azure.eventhub.Offset :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int - :returns: ~azure.eventhub.ReceiverAsync + :rtype: ~azure.eventhub.async.ReceiverAsync """ source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, self.address.path, consumer_group, partition) @@ -151,6 +153,7 @@ def add_async_epoch_receiver(self, consumer_group, partition, epoch, prefetch=30 can connect to a partition at any given time - additional epoch receivers must have a higher epoch value or they will be rejected. If a 2nd epoch receiver has connected, the first will be closed. + :param consumer_group: The name of the consumer group. :type consumer_group: str :param partition: The ID of the partition. @@ -159,7 +162,7 @@ def add_async_epoch_receiver(self, consumer_group, partition, epoch, prefetch=30 :type epoch: int :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int - :returns: ~azure.eventhub.ReceiverAsync + :rtype: ~azure.eventhub.async.ReceiverAsync """ source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, self.address.path, consumer_group, partition) @@ -171,11 +174,12 @@ def add_async_sender(self, partition=None, loop=None): """ Add an async sender to the client to send ~azure.eventhub.EventData object to an EventHub. + :param partition: Optionally specify a particular partition to send to. If omitted, the events will be distributed to available partitions via - round-robin + round-robin. :type partition: str - :returns: ~azure.eventhub.SenderAsync + :rtype: ~azure.eventhub.async.SenderAsync """ target = "amqps://{}{}".format(self.address.hostname, self.address.path) handler = AsyncSender(self, target, partition=partition, loop=loop) @@ -190,8 +194,9 @@ class AsyncSender(Sender): def __init__(self, client, target, partition=None, loop=None): # pylint: disable=super-init-not-called """ Instantiate an EventHub event SenderAsync client. - :param client: The parent EventHubClient. - :type client: ~azure.eventhub.EventHubClient. + + :param client: The parent EventHubClientAsync. + :type client: ~azure.eventhub.async.EventHubClientAsync :param target: The URI of the EventHub to send to. :type target: str :param loop: An event loop. @@ -213,6 +218,7 @@ async def send(self, event_data): """ Sends an event data and asynchronously waits until acknowledgement is received or operation times out. + :param event_data: The event to be sent. :type event_data: ~azure.eventhub.EventData :raises: ~azure.eventhub.EventHubError if the message fails to @@ -237,10 +243,11 @@ class AsyncReceiver(Receiver): def __init__(self, client, source, prefetch=300, epoch=None, loop=None): # pylint: disable=super-init-not-called """ Instantiate an async receiver. - :param client: The parent EventHubClient. - :type client: ~azure.eventhub.EventHubClient + + :param client: The parent EventHubClientAsync. + :type client: ~azure.eventhub.async.EventHubClientAsync :param source: The source EventHub from which to receive events. - :type source: ~uamqp.Source + :type source: ~uamqp.address.Source :param prefetch: The number of events to prefetch from the service for processing. Default is 300. :type prefetch: int @@ -268,6 +275,7 @@ def __init__(self, client, source, prefetch=300, epoch=None, loop=None): # pyli async def receive(self, max_batch_size=None, callback=None, timeout=None): """ Receive events asynchronously from the EventHub. + :param max_batch_size: Receive a batch of events. Batch size will be up to the maximum specified, but will return as soon as service returns no new events. If combined with a timeout and no events are @@ -278,7 +286,7 @@ async def receive(self, max_batch_size=None, callback=None, timeout=None): be a function that accepts a single argument - the event data. This callback will be run before the message is returned in the result generator. :type callback: func[~azure.eventhub.EventData] - :returns: list[~azure.eventhub.EventData] + :rtype: list[~azure.eventhub.EventData] """ try: self._callback = callback diff --git a/azure/eventprocessorhost/abstract_checkpoint_manager.py b/azure/eventprocessorhost/abstract_checkpoint_manager.py index 081ebd4..5e6ec84 100644 --- a/azure/eventprocessorhost/abstract_checkpoint_manager.py +++ b/azure/eventprocessorhost/abstract_checkpoint_manager.py @@ -20,8 +20,10 @@ def __init__(self): async def create_checkpoint_store_if_not_exists_async(self): """ Create the checkpoint store if it doesn't exist. Do nothing if it does exist. - :returns: `True` if the checkpoint store already exists or was created OK, `False` + + :return: `True` if the checkpoint store already exists or was created OK, `False` if there was a failure. + :rtype: bool """ pass @@ -30,9 +32,11 @@ async def get_checkpoint_async(self, partition_id): """ Get the checkpoint data associated with the given partition. Could return null if no checkpoint has been created for that partition. + :param partition_id: The ID of a given parition. :type partition_id: str - :returns: Given partition checkpoint info, or `None` if none has been previously stored. + :return: Given partition checkpoint info, or `None` if none has been previously stored. + :rtype: ~azure.eventprocessorhost.checkpoint.Checkpoint """ pass @@ -41,20 +45,23 @@ async def create_checkpoint_if_not_exists_async(self, partition_id): """ Create the given partition checkpoint if it doesn't exist.Do nothing if it does exist. The offset/sequenceNumber for a freshly-created checkpoint should be set to StartOfStream/0. + :param partition_id: The ID of a given parition. :type partition_id: str - :returns: The checkpoint for the given partition, whether newly created or already existing. + :return: The checkpoint for the given partition, whether newly created or already existing. + :rtype: ~azure.eventprocessorhost.checkpoint.Checkpoint """ pass @abstractmethod async def update_checkpoint_async(self, lease, checkpoint): """ - Update the checkpoint in the store with the offset/sequenceNumber in the provided checkpoint + Update the checkpoint in the store with the offset/sequenceNumber in the provided checkpoint. + :param lease: The lease to be updated. - :type lease: ~azure.eventprocessorhost.Lease + :type lease: ~azure.eventprocessorhost.lease.Lease :param checkpoint: offset/sequeceNumber to update the store with. - :type checkpoint: ~azure.eventprocessorhost.Checkpoint + :type checkpoint: ~azure.eventprocessorhost.checkpoint.Checkpoint """ pass @@ -63,6 +70,7 @@ async def delete_checkpoint_async(self, partition_id): """ Delete the stored checkpoint for the given partition. If there is no stored checkpoint for the given partition, that is treated as success. + :param partition_id: The ID of a given parition. :type partition_id: str """ diff --git a/azure/eventprocessorhost/abstract_event_processor.py b/azure/eventprocessorhost/abstract_event_processor.py index fa4020b..6ff9dd4 100644 --- a/azure/eventprocessorhost/abstract_event_processor.py +++ b/azure/eventprocessorhost/abstract_event_processor.py @@ -16,8 +16,9 @@ def __init__(self, params=None): async def open_async(self, context): """ Called by processor host to initialize the event processor. + :param context: Information about the partition - :type context: ~azure.eventprocessorhost.PartitionContext + :type context: ~azure.eventprocessorhost.partition_context.PartitionContext """ pass @@ -25,8 +26,9 @@ async def open_async(self, context): async def close_async(self, context, reason): """ Called by processor host to indicate that the event processor is being stopped. + :param context: Information about the partition - :type context: ~azure.eventprocessorhost.PartitionContext + :type context: ~azure.eventprocessorhost.partition_context.PartitionContext :param reason: The reason for closing. :type reason: str """ @@ -37,10 +39,11 @@ async def process_events_async(self, context, messages): """ Called by the processor host when a batch of events has arrived. This is where the real work of the event processor is done. + :param context: Information about the partition - :type context: ~azure.eventprocessorhost.PartitionContext + :type context: ~azure.eventprocessorhost.partition_context.PartitionContext :param messages: The events to be processed. - :type messages: list of ~azure.eventhub.EventData + :type messages: list[~azure.eventhub.EventData] """ pass @@ -49,9 +52,10 @@ async def process_error_async(self, context, error): """ Called when the underlying client experiences an error while receiving. EventProcessorHost will take care of recovering from the error and - continuing to pump messages,so no action is required from + continuing to pump messages. + :param context: Information about the partition - :type context: ~azure.eventprocessorhost.PartitionContext + :type context: ~azure.eventprocessorhost.partition_context.PartitionContext :param error: The error that occured. """ pass diff --git a/azure/eventprocessorhost/abstract_lease_manager.py b/azure/eventprocessorhost/abstract_lease_manager.py index f9102e6..8638e49 100644 --- a/azure/eventprocessorhost/abstract_lease_manager.py +++ b/azure/eventprocessorhost/abstract_lease_manager.py @@ -25,7 +25,9 @@ def __init__(self, lease_renew_interval, lease_duration): async def create_lease_store_if_not_exists_async(self): """ Create the lease store if it does not exist, do nothing if it does exist. - :returns: `True` if the lease store already exists or was created successfully, `False` if not. + + :return: `True` if the lease store already exists or was created successfully, `False` if not. + :rtype: bool """ pass @@ -33,7 +35,9 @@ async def create_lease_store_if_not_exists_async(self): async def delete_lease_store_async(self): """ Not used by EventProcessorHost, but a convenient function to have for testing. - :returns: `True` if the lease store was deleted successfully, `False` if not. + + :return: `True` if the lease store was deleted successfully, `False` if not. + :rtype: bool """ pass @@ -41,9 +45,11 @@ async def get_lease_async(self, partition_id): """ Return the lease info for the specified partition. Can return null if no lease has been created in the store for the specified partition. + :param partition_id: The ID of a given partition. :type parition_id: str - :returns: lease info for the partition, or `None`. + :return: Lease info for the partition, or `None`. + :rtype: """ pass @@ -52,7 +58,9 @@ def get_all_leases(self): """ Return the lease info for all partitions. A typical implementation could just call get_lease_async() on all partitions. - :returns: list of lease info. + + :return: A list of lease info. + :rtype: """ pass @@ -61,9 +69,10 @@ async def create_lease_if_not_exists_async(self, partition_id): """ Create in the store the lease info for the given partition, if it does not exist. Do nothing if it does exist in the store already. + :param partition_id: The ID of a given partition. :type parition_id: str - :returns: The existing or newly-created lease info for the partition. + :return: The existing or newly-created lease info for the partition. """ pass @@ -72,8 +81,9 @@ async def delete_lease_async(self, lease): """ Delete the lease info for the given partition from the store. If there is no stored lease for the given partition, that is treated as success. + :param lease: The lease to be deleted. - :type lease: ~azure.eventprocessorhost.Lease + :type lease: ~azure.eventprocessorhost.lease.Lease """ pass @@ -83,9 +93,11 @@ async def acquire_lease_async(self, lease): Acquire the lease on the desired partition for this EventProcessorHost. Note that it is legal to acquire a lease that is already owned by another host. Lease-stealing is how partitions are redistributed when additional hosts are started. + :param lease: The lease to be acquired. - :type lease: ~azure.eventprocessorhost.Lease - :returns: `True` if the lease was acquired successfully, `False` if not. + :type lease: ~azure.eventprocessorhost.lease.Lease + :return: `True` if the lease was acquired successfully, `False` if not. + :rtype: bool """ pass @@ -95,9 +107,11 @@ async def renew_lease_async(self, lease): Renew a lease currently held by this host. If the lease has been stolen, or expired, or released, it is not possible to renew it. You will have to call get_lease_async() and then acquire_lease_async() again. + :param lease: The lease to be renewed. - :type lease: ~azure.eventprocessorhost.Lease - :returns: `True` if the lease was renewed successfully, `False` if not. + :type lease: ~azure.eventprocessorhost.lease.Lease + :return: `True` if the lease was renewed successfully, `False` if not. + :rtype: bool """ pass @@ -106,9 +120,11 @@ async def release_lease_async(self, lease): """ Give up a lease currently held by this host. If the lease has been stolen, or expired, releasing it is unnecessary, and will fail if attempted. + :param lease: The lease to be released. - :type lease: ~azure.eventprocessorhost.Lease - :returns: `True` if the lease was released successfully, `False` if not. + :type lease: ~azure.eventprocessorhost.lease.Lease + :return: `True` if the lease was released successfully, `False` if not. + :rtype: bool """ pass @@ -119,9 +135,10 @@ async def update_lease_async(self, lease): hold a lease in order to update it. If the lease has been stolen, or expired, or released, it cannot be updated. Updating should renew the lease before performing the update to avoid lease expiration during the process. + :param lease: The lease to be updated. - :type lease: ~azure.eventprocessorhost.Lease - :returns: `True` if the updated was performed successfully, `False` if not. + :type lease: ~azure.eventprocessorhost.lease.Lease + :return: `True` if the updated was performed successfully, `False` if not. + :rtype: bool """ pass - \ No newline at end of file diff --git a/azure/eventprocessorhost/azure_blob_lease.py b/azure/eventprocessorhost/azure_blob_lease.py index 6c01760..04ec135 100644 --- a/azure/eventprocessorhost/azure_blob_lease.py +++ b/azure/eventprocessorhost/azure_blob_lease.py @@ -16,7 +16,7 @@ class AzureBlobLease(Lease): def __init__(self): """ - Init Azure Blob Lease + Init Azure Blob Lease. """ super() Lease.__init__(self) @@ -25,7 +25,7 @@ def __init__(self): def serializable(self): """ - Returns Serialiazble instance of __dict__ + Returns Serialiazble instance of `__dict__`. """ serial = self.__dict__.copy() del serial['state'] @@ -33,13 +33,13 @@ def serializable(self): def with_lease(self, lease): """ - Init with exisiting lease + Init with exisiting lease. """ super().with_source(lease) def with_blob(self, blob): """ - Init Azure Blob Lease with existing blob + Init Azure Blob Lease with existing blob. """ content = json.loads(blob.content) self.partition_id = content["partition_id"] @@ -51,7 +51,7 @@ def with_blob(self, blob): def with_source(self, lease): """ - Init Azure Blob Lease from existing + Init Azure Blob Lease from existing. """ super().with_source(lease) self.offset = lease.offset @@ -59,7 +59,7 @@ def with_source(self, lease): async def is_expired(self): """ - Check and return azure blob lease state using storage api + Check and return Azure Blob Lease state using Storage API. """ if asyncio.iscoroutinefunction(self.state): current_state = await self.state() diff --git a/azure/eventprocessorhost/azure_storage_checkpoint_manager.py b/azure/eventprocessorhost/azure_storage_checkpoint_manager.py index 261ea43..a631fdb 100644 --- a/azure/eventprocessorhost/azure_storage_checkpoint_manager.py +++ b/azure/eventprocessorhost/azure_storage_checkpoint_manager.py @@ -76,8 +76,10 @@ def initialize(self, host): async def create_checkpoint_store_if_not_exists_async(self): """ Create the checkpoint store if it doesn't exist. Do nothing if it does exist. - :returns: `True` if the checkpoint store already exists or was created OK, `False` - if there was a failure + + :return: `True` if the checkpoint store already exists or was created OK, `False` + if there was a failure. + :rtype: bool """ await self.create_lease_store_if_not_exists_async() @@ -85,7 +87,11 @@ async def get_checkpoint_async(self, partition_id): """ Get the checkpoint data associated with the given partition. Could return null if no checkpoint has been created for that partition. - :returns: Given partition checkpoint info, or `None` if none has been previously stored. + + :param partition_id: The partition ID. + :type partition_id: str + :return: Given partition checkpoint info, or `None` if none has been previously stored. + :rtype: ~azure.eventprocessorhost.checkpoint.Checkpoint """ lease = await self.get_lease_async(partition_id) checkpoint = None @@ -99,7 +105,11 @@ async def create_checkpoint_if_not_exists_async(self, partition_id): """ Create the given partition checkpoint if it doesn't exist.Do nothing if it does exist. The offset/sequenceNumber for a freshly-created checkpoint should be set to StartOfStream/0. - :returns: The checkpoint for the given partition, whether newly created or already existing. + + :param partition_id: The partition ID. + :type partition_id: str + :return: The checkpoint for the given partition, whether newly created or already existing. + :rtype: ~azure.eventprocessorhost.checkpoint.Checkpoint """ checkpoint = await self.get_checkpoint_async(partition_id) if not checkpoint: @@ -111,6 +121,11 @@ async def update_checkpoint_async(self, lease, checkpoint): """ Update the checkpoint in the store with the offset/sequenceNumber in the provided checkpoint checkpoint:offset/sequeceNumber to update the store with. + + :param lease: The stored lease to be updated. + :type lease: ~azure.eventprocessorhost.lease.Lease + :param checkpoint: The checkpoint to update the lease with. + :type checkpoint: ~azure.eventprocessorhost.checkpoint.Checkpoint """ new_lease = AzureBlobLease() new_lease.with_source(lease) @@ -122,6 +137,9 @@ async def delete_checkpoint_async(self, partition_id): """ Delete the stored checkpoint for the given partition. If there is no stored checkpoint for the given partition, that is treated as success. + + :param partition_id: The partition ID. + :type partition_id: str """ return # Make this a no-op to avoid deleting leases by accident. @@ -130,7 +148,9 @@ async def delete_checkpoint_async(self, partition_id): async def create_lease_store_if_not_exists_async(self): """ Create the lease store if it does not exist, do nothing if it does exist. - :returns: `True` if the lease store already exists or was created successfully, `False` if not. + + :return: `True` if the lease store already exists or was created successfully, `False` if not. + :rtype: bool """ try: await self.host.loop.run_in_executor( @@ -148,7 +168,9 @@ async def create_lease_store_if_not_exists_async(self): async def delete_lease_store_async(self): """ Not used by EventProcessorHost, but a convenient function to have for testing. - :returns: `True` if the lease store was deleted successfully, `False` if not. + + :return: `True` if the lease store was deleted successfully, `False` if not. + :rtype: bool """ return "Not Supported in Python" @@ -156,7 +178,11 @@ async def get_lease_async(self, partition_id): """ Return the lease info for the specified partition. Can return null if no lease has been created in the store for the specified partition. - :returns: lease info for the partition, or `None`. + + :param partition_id: The partition ID. + :type partition_id: str + :return: lease info for the partition, or `None`. + :rtype: ~azure.eventprocessorhost.lease.Lease """ try: blob = await self.host.loop.run_in_executor( @@ -191,7 +217,9 @@ async def get_all_leases(self): """ Return the lease info for all partitions. A typical implementation could just call get_lease_async() on all partitions. - :returns: list of lease info. + + :return: A list of lease info. + :rtype: list[~azure.eventprocessorhost.lease.Lease] """ lease_futures = [] partition_ids = await self.host.partition_manager.get_partition_ids_async() @@ -203,9 +231,11 @@ async def create_lease_if_not_exists_async(self, partition_id): """ Create in the store the lease info for the given partition, if it does not exist. Do nothing if it does exist in the store already. + :param partition_id: The ID of a given parition. :type partition_id: str - :returns: the existing or newly-created lease info for the partition. + :return: the existing or newly-created lease info for the partition. + :rtype: ~azure.eventprocessorhost.lease.Lease """ return_lease = None try: @@ -235,6 +265,9 @@ async def delete_lease_async(self, lease): """ Delete the lease info for the given partition from the store. If there is no stored lease for the given partition, that is treated as success. + + :param lease: The stored lease to be deleted. + :type lease: ~azure.eventprocessorhost.lease.Lease """ await self.host.loop.run_in_executor( self.executor, @@ -249,7 +282,11 @@ async def acquire_lease_async(self, lease): Acquire the lease on the desired partition for this EventProcessorHost. Note that it is legal to acquire a lease that is already owned by another host. Lease-stealing is how partitions are redistributed when additional hosts are started. - :returns: `True` if the lease was acquired successfully, `False` if not. + + :param lease: The stored lease to be acquired. + :type lease: ~azure.eventprocessorhost.lease.Lease + :return: `True` if the lease was acquired successfully, `False` if not. + :rtype: bool """ retval = True new_lease_id = str(uuid.uuid4()) @@ -307,7 +344,11 @@ async def renew_lease_async(self, lease): Renew a lease currently held by this host. If the lease has been stolen, or expired, or released, it is not possible to renew it. You will have to call getLease() and then acquireLease() again. - :returns: `True` if the lease was renewed successfully, `False` if not. + + :param lease: The stored lease to be renewed. + :type lease: ~azure.eventprocessorhost.lease.Lease + :return: `True` if the lease was renewed successfully, `False` if not. + :rtype: bool """ try: await self.host.loop.run_in_executor( @@ -331,7 +372,11 @@ async def release_lease_async(self, lease): """ Give up a lease currently held by this host. If the lease has been stolen, or expired, releasing it is unnecessary, and will fail if attempted. - :returns: `True` if the lease was released successfully, `False` if not. + + :param lease: The stored lease to be released. + :type lease: ~azure.eventprocessorhost.lease.Lease + :return: `True` if the lease was released successfully, `False` if not. + :rtype: bool """ lease_id = None try: @@ -369,7 +414,11 @@ async def update_lease_async(self, lease): hold a lease in order to update it. If the lease has been stolen, or expired, or released, it cannot be updated. Updating should renew the lease before performing the update to avoid lease expiration during the process. - :returns: `True` if the updated was performed successfully, `False` if not. + + :param lease: The stored lease to be updated. + :type lease: ~azure.eventprocessorhost.lease.Lease + :return: `True` if the updated was performed successfully, `False` if not. + :rtype: bool """ if lease is None: return False diff --git a/azure/eventprocessorhost/cancellation_token.py b/azure/eventprocessorhost/cancellation_token.py index 0764af6..ae1aeae 100644 --- a/azure/eventprocessorhost/cancellation_token.py +++ b/azure/eventprocessorhost/cancellation_token.py @@ -8,13 +8,13 @@ """ class CancellationToken: """ - Thread Safe Mutable Cancellation Token + Thread Safe Mutable Cancellation Token. """ def __init__(self): self.is_cancelled = False def cancel(self): """ - Cancel the token + Cancel the token. """ self.is_cancelled = True diff --git a/azure/eventprocessorhost/checkpoint.py b/azure/eventprocessorhost/checkpoint.py index 48b03f1..ff09052 100644 --- a/azure/eventprocessorhost/checkpoint.py +++ b/azure/eventprocessorhost/checkpoint.py @@ -5,11 +5,12 @@ class Checkpoint: """ - Contains checkpoint metadata + Contains checkpoint metadata. """ def __init__(self, partition_id, offset="-1", sequence_number="0"): - """Initialize Checkpoint + """Initialize Checkpoint. + :param partition_id: The parition ID of the checkpoint. :type partition_id: str :param offset: The receive offset of the checkpoint. @@ -24,8 +25,9 @@ def __init__(self, partition_id, offset="-1", sequence_number="0"): def from_source(self, checkpoint): """ Creates a new Checkpoint from an existing checkpoint. + :param checkpoint: Existing checkpoint. - :type checkpoint: ~azure.eventprocessorhost.Checkpoint + :type checkpoint: ~azure.eventprocessorhost.checkpoint.Checkpoint """ self.partition_id = checkpoint.partition_id self.offset = checkpoint.offset diff --git a/azure/eventprocessorhost/eh_config.py b/azure/eventprocessorhost/eh_config.py index b977416..73f89a8 100644 --- a/azure/eventprocessorhost/eh_config.py +++ b/azure/eventprocessorhost/eh_config.py @@ -12,6 +12,7 @@ class EventHubConfig: """ A container class for Event Hub properties. + :param sb_name: The EventHub (ServiceBus) namespace. :type sb_name: str :param eh_name: The EventHub name. @@ -43,6 +44,8 @@ def get_client_address(self): """ Returns an auth token dictionary for making calls to eventhub REST API. + + :rtype: str """ return "amqps://{}:{}@{}.{}:5671/{}".format( urllib.parse.quote_plus(self.policy), @@ -54,6 +57,8 @@ def get_client_address(self): def get_rest_token(self): """ Returns an auth token for making calls to eventhub REST API. + + :rtype: str """ uri = urllib.parse.quote_plus( "https://{}.{}/{}".format(self.sb_name, self.namespace_suffix, self.eh_name)) diff --git a/azure/eventprocessorhost/eh_partition_pump.py b/azure/eventprocessorhost/eh_partition_pump.py index 87db09d..738422d 100644 --- a/azure/eventprocessorhost/eh_partition_pump.py +++ b/azure/eventprocessorhost/eh_partition_pump.py @@ -15,7 +15,7 @@ class EventHubPartitionPump(PartitionPump): """ - Pulls and messages from lease partition from eventhub and sends them to processor + Pulls and messages from lease partition from eventhub and sends them to processor. """ def __init__(self, host, lease): @@ -27,7 +27,7 @@ def __init__(self, host, lease): async def on_open_async(self): """ - Eventhub Override for on_open_async + Eventhub Override for on_open_async. """ _opened_ok = False _retry_count = 0 @@ -61,7 +61,7 @@ async def on_open_async(self): async def open_clients_async(self): """ Responsible for establishing connection to event hub client - throws EventHubsException, IOException, InterruptedException, ExecutionException + throws EventHubsException, IOException, InterruptedException, ExecutionException. """ await self.partition_context.get_initial_offset_async() # Create event hub client and receive handler and set options @@ -78,7 +78,7 @@ async def open_clients_async(self): async def clean_up_clients_async(self): """ - Resets the pump swallows all exceptions + Resets the pump swallows all exceptions. """ if self.partition_receiver: if self.eh_client: @@ -89,7 +89,8 @@ async def clean_up_clients_async(self): async def on_closing_async(self, reason): """ - Overides partition pump on cleasing + Overides partition pump on cleasing. + :param reason: The reason for the shutdown. :type reason: str """ @@ -100,7 +101,7 @@ async def on_closing_async(self, reason): class PartitionReceiver: """ - Recieves events from a async until lease is lost + Recieves events asynchronously until lease is lost. """ def __init__(self, eh_partition_pump): @@ -110,7 +111,7 @@ def __init__(self, eh_partition_pump): async def run(self): """ - Runs the async partion reciever event loop to retrive messages from the event queue + Runs the async partion reciever event loop to retrive messages from the event queue. """ # Implement pull max batch from queue instead of one message at a time while self.eh_partition_pump.pump_status != "Errored" and not self.eh_partition_pump.is_closing(): @@ -135,10 +136,11 @@ async def process_events_async(self, events): """ This method is called on the thread that the EH client uses to run the pump. There is one pump per EventHubClient. Since each PartitionPump creates a - new EventHubClient,using that thread to call OnEvents does no harm. Even if OnEvents + new EventHubClient, using that thread to call OnEvents does no harm. Even if OnEvents is slow, the pump will get control back each time OnEvents returns, and be able to receive a new batch of messages with which to make the next OnEvents call.The pump gains nothing by running faster than OnEvents. + :param events: List of events to be processed. :type events: list of ~azure.eventhub.EventData """ @@ -147,7 +149,8 @@ async def process_events_async(self, events): async def process_error_async(self, error): """ Handles processing errors this is never called since python recieve client doesn't - have error handling implemented (TBD add fault pump handling) + have error handling implemented (TBD add fault pump handling). + :param error: An error the occurred. :type error: Exception """ diff --git a/azure/eventprocessorhost/eph.py b/azure/eventprocessorhost/eph.py index 778a047..c344628 100644 --- a/azure/eventprocessorhost/eph.py +++ b/azure/eventprocessorhost/eph.py @@ -11,25 +11,26 @@ class EventProcessorHost: """ Represents a host for processing Event Hubs event data at scale. - Takes in event hub a event processor class definition a eh_config object - As well as a storage manager and an optional event_processor params (ep_params) + Takes in an event hub, a event processor class definition, a config object, + as well as a storage manager and optional event processor params (ep_params). """ def __init__(self, event_processor, eh_config, storage_manager, ep_params=None, eph_options=None, loop=None): """ Initialize EventProcessorHost. + :param event_processor: The event processing handler. - :type event_processor: ~azure.eventprocessorhost.AbstractEventProcessor + :type event_processor: ~azure.eventprocessorhost.abstract_event_processor.AbstractEventProcessor :param eh_config: The EPH connection configuration. - :type eh_config: ~azure.eventprocessorhost.EventHubConfig + :type eh_config: ~azure.eventprocessorhost.eh_config.EventHubConfig :param storage_manager: The Azure storage manager for persisting lease and checkpoint information. - :type storage_manager: ~azure.eventprocessorhost.AzureStorageCheckpointLeaseManager + :type storage_manager: ~azure.eventprocessorhost.azure_storage_checkpoint_manager.AzureStorageCheckpointLeaseManager :param ep_params: Optional arbitrary parameters to be passed into the event_processor on initialization. :type ep_params: list :param eph_options: EPH configuration options. - :type eph_options: ~azure.eventprocessorhost.EPHOptions + :type eph_options: ~azure.eventprocessorhost.eph.EPHOptions :param loop: An eventloop. If not provided the default asyncio event loop will be used. """ self.event_processor = event_processor @@ -46,7 +47,7 @@ def __init__(self, event_processor, eh_config, storage_manager, ep_params=None, async def open_async(self): """ - Starts the host + Starts the host. """ if not self.loop: self.loop = asyncio.get_event_loop() @@ -54,14 +55,14 @@ async def open_async(self): async def close_async(self): """ - Stops the host + Stops the host. """ await self.partition_manager.stop_async() class EPHOptions: """ - Class that contains default and overidable EPH option + Class that contains default and overidable EPH option. """ def __init__(self): diff --git a/azure/eventprocessorhost/lease.py b/azure/eventprocessorhost/lease.py index 6c45371..6d4e0b1 100644 --- a/azure/eventprocessorhost/lease.py +++ b/azure/eventprocessorhost/lease.py @@ -18,7 +18,8 @@ def __init__(self): def with_partition_id(self, partition_id): """ - Init with partition Id + Init with partition Id. + :param partition_id: ID of a given partition. :type partition_id: str """ @@ -30,8 +31,9 @@ def with_partition_id(self, partition_id): def with_source(self, lease): """ Init with existing lease. + :param lease: An existing Lease. - :type lease: ~azure.eventprocessorhost.Lease + :type lease: ~azure.eventprocessorhost.lease.Lease """ self.partition_id = lease.partition_id self.epoch = lease.epoch @@ -42,13 +44,14 @@ async def is_expired(self): """ Determines whether the lease is expired. By default lease never expires. Deriving class implements the lease expiry logic. - :returns: bool + + :rtype: bool """ return False def increment_epoch(self): """ - Increment lease epoch + Increment lease epoch. """ self.epoch += 1 return self.epoch diff --git a/azure/eventprocessorhost/partition_context.py b/azure/eventprocessorhost/partition_context.py index d4cfa19..fb619b3 100644 --- a/azure/eventprocessorhost/partition_context.py +++ b/azure/eventprocessorhost/partition_context.py @@ -12,7 +12,7 @@ class PartitionContext: """ - Encapsulates information related to an Event Hubs partition used by AbstractEventProcessor + Encapsulates information related to an Event Hubs partition used by AbstractEventProcessor. """ def __init__(self, host, partition_id, eh_path, consumer_group_name, pump_loop=None): @@ -28,6 +28,7 @@ def __init__(self, host, partition_id, eh_path, consumer_group_name, pump_loop=N def set_offset_and_sequence_number(self, event_data): """ Updates offset based on event. + :param event_data: A received EventData with valid offset and sequenceNumber. :type event_data: ~azure.eventhub.EventData """ @@ -39,7 +40,8 @@ def set_offset_and_sequence_number(self, event_data): async def get_initial_offset_async(self): # throws InterruptedException, ExecutionException """ Gets the initial offset for processing the partition. - :returns: str + + :rtype: str """ _logger.info("Calling user-provided initial offset provider {} {}".format( self.host.guid, self.partition_id)) @@ -60,7 +62,7 @@ async def get_initial_offset_async(self): # throws InterruptedException, Executi async def checkpoint_async(self): """ Generates a checkpoint for the partition using the curren offset and sequenceNumber for - and persists to the checkpoint manager + and persists to the checkpoint manager. """ captured_checkpoint = Checkpoint(self.partition_id, self.offset, self.sequence_number) await self.persist_checkpoint_async(captured_checkpoint) @@ -69,9 +71,10 @@ async def checkpoint_async_event_data(self, event_data): """ Stores the offset and sequenceNumber from the provided received EventData instance, then writes those values to the checkpoint store via the checkpoint manager. + :param event_data: A received EventData with valid offset and sequenceNumber. :type event_data: ~azure.eventhub.EventData - :raises: ValueError if suplied event_data is None + :raises: ValueError if suplied event_data is None. :raises: ValueError if the sequenceNumber is less than the last checkpointed value. """ if not event_data: @@ -89,7 +92,7 @@ def to_string(self): Returns the parition context in the following format: "PartitionContext({EventHubPath}{ConsumerGroupName}{PartitionId}{SequenceNumber})" - :returns: str + :rtype: str """ return "PartitionContext({}{}{}{})".format(self.eh_path, self.consumer_group_name, diff --git a/azure/eventprocessorhost/partition_manager.py b/azure/eventprocessorhost/partition_manager.py index 8c1e53a..2580abc 100644 --- a/azure/eventprocessorhost/partition_manager.py +++ b/azure/eventprocessorhost/partition_manager.py @@ -18,7 +18,7 @@ class PartitionManager: """ - Manages the partition event pump execution + Manages the partition event pump execution. """ def __init__(self, host): @@ -30,7 +30,9 @@ def __init__(self, host): async def get_partition_ids_async(self): """ - Returns a list of all the event hub partition ids + Returns a list of all the event hub partition IDs. + + :rtype: list[str] """ if not self.partition_ids: eh_client = EventHubClientAsync( @@ -65,7 +67,7 @@ async def stop_async(self): async def run_async(self): """ - Starts the run loop and manages exceptions and cleanup + Starts the run loop and manages exceptions and cleanup. """ try: await self.run_loop_async() @@ -82,7 +84,10 @@ async def initialize_stores_async(self): """ Intializes the partition checkpoint and lease store ensures that a checkpoint exists for all partitions. Note in this case checkpoint and lease stores are - the same storage manager construct. Returns the number of partitions + the same storage manager construct. + + :return: Returns the number of partitions. + :rtype: int """ await self.host.storage_manager.create_checkpoint_store_if_not_exists_async() partition_ids = await self.get_partition_ids_async() @@ -102,7 +107,7 @@ async def initialize_stores_async(self): def retry(self, func, partition_id, retry_message, final_failure_message, max_retries, host_id): """ - Make attempt_renew_lease async call sync + Make attempt_renew_lease async call sync. """ loop = asyncio.new_event_loop() loop.run_until_complete(self.retry_async(func, partition_id, retry_message, @@ -111,7 +116,7 @@ def retry(self, func, partition_id, retry_message, final_failure_message, max_re async def retry_async(self, func, partition_id, retry_message, final_failure_message, max_retries, host_id): """ - Throws if it runs out of retries. If it returns, action succeeded + Throws if it runs out of retries. If it returns, action succeeded. """ created_okay = False retry_count = 0 @@ -127,7 +132,7 @@ async def retry_async(self, func, partition_id, retry_message, async def run_loop_async(self): """ - This is the main execution loop for allocating and manging pumps + This is the main execution loop for allocating and manging pumps. """ while not self.cancellation_token.is_cancelled: lease_manager = self.host.storage_manager @@ -189,7 +194,12 @@ async def run_loop_async(self): async def check_and_add_pump_async(self, partition_id, lease): """ - Updates the lease on an exisiting pump + Updates the lease on an exisiting pump. + + :param partition_id: The partition ID. + :type partition_id: str + :param lease: The lease to be used. + :type lease: ~azure.eventprocessorhost.lease.Lease """ if partition_id in self.partition_pumps: # There already is a pump. Make sure the pump is working and replace the lease. @@ -208,7 +218,12 @@ async def check_and_add_pump_async(self, partition_id, lease): async def create_new_pump_async(self, partition_id, lease): """ - Create a new pump thread with a given lease + Create a new pump thread with a given lease. + + :param partition_id: The partition ID. + :type partition_id: str + :param lease: The lease to be used. + :type lease: ~azure.eventprocessorhost.lease.Lease """ loop = asyncio.get_event_loop() partition_pump = EventHubPartitionPump(self.host, lease) @@ -219,7 +234,12 @@ async def create_new_pump_async(self, partition_id, lease): async def remove_pump_async(self, partition_id, reason): """ - Stops a single partiton pump + Stops a single partiton pump. + + :param partition_id: The partition ID. + :type partition_id: str + :param reason: A reason for closing. + :type reason: str """ if partition_id in self.partition_pumps: captured_pump = self.partition_pumps[partition_id] @@ -239,7 +259,11 @@ async def remove_pump_async(self, partition_id, reason): async def remove_all_pumps_async(self, reason): """ Stops all partition pumps - (Note this might be wrong and need to await all tasks before returning done) + (Note this might be wrong and need to await all tasks before returning done). + + :param reason: A reason for closing. + :type reason: str + :rtype: bool """ pump_tasks = [self.remove_pump_async(p_id, reason) for p_id in self.partition_pumps] await asyncio.gather(*pump_tasks) @@ -267,6 +291,12 @@ def which_lease_to_steal(self, stealable_leases, have_lease_count): biggest and this host by two at a time. If the starting difference is two or greater, then the difference cannot end up below 0. This host may become tied for biggest, but it cannot become larger than the host that it is stealing from. + + :param stealable_leases: List of leases to determine which can be stolen. + :type stealable_leases: list[~azure.eventprocessorhost.lease.Lease] + :param have_lease_count: Lease count. + :type have_lease_count: int + :rtype: ~azure.eventprocessorhost.lease.Lease """ counts_by_owner = self.count_leases_by_owner(stealable_leases) biggest_owner = (sorted(counts_by_owner.items(), key=lambda kv: kv[1])).pop() @@ -278,14 +308,14 @@ def which_lease_to_steal(self, stealable_leases, have_lease_count): def count_leases_by_owner(self, leases): # pylint: disable=no-self-use """ - Returns a dictionary of leases by current owner + Returns a dictionary of leases by current owner. """ owners = [l.owner for l in leases] return dict(Counter(owners)) def attempt_renew_lease(self, lease_task, owned_by_others_q, lease_manager): """ - Make attempt_renew_lease async call sync + Make attempt_renew_lease async call sync. """ loop = asyncio.new_event_loop() loop.run_until_complete(self.attempt_renew_lease_async(lease_task, owned_by_others_q, lease_manager)) @@ -293,7 +323,7 @@ def attempt_renew_lease(self, lease_task, owned_by_others_q, lease_manager): async def attempt_renew_lease_async(self, lease_task, owned_by_others_q, lease_manager): """ Attempts to renew a potential lease if possible and - marks in the queue as none adds to adds to the queue + marks in the queue as none adds to adds to the queue. """ try: possible_lease = await lease_task diff --git a/azure/eventprocessorhost/partition_pump.py b/azure/eventprocessorhost/partition_pump.py index 1327463..26e62b0 100644 --- a/azure/eventprocessorhost/partition_pump.py +++ b/azure/eventprocessorhost/partition_pump.py @@ -14,7 +14,7 @@ class PartitionPump(): """ - Manages individual connection to a given partition + Manages individual connection to a given partition. """ def __init__(self, host, lease): @@ -27,21 +27,22 @@ def __init__(self, host, lease): def run(self): """ - Makes pump sync so that it can be run in a thread + Makes pump sync so that it can be run in a thread. """ self.loop = asyncio.new_event_loop() self.loop.run_until_complete(self.open_async()) def set_pump_status(self, status): """ - Updates pump status and logs update to console + Updates pump status and logs update to console. """ self.pump_status = status _logger.info("{} partition {}".format(status, self.lease.partition_id)) def set_lease(self, new_lease): """ - Sets a new partition lease to be processed by the pump + Sets a new partition lease to be processed by the pump. + :param lease: The lease to set. :type lease: ~azure.eventprocessorhost.lease.Lease """ @@ -50,7 +51,7 @@ def set_lease(self, new_lease): async def open_async(self): """ - Opens partition pump + Opens partition pump. """ self.set_pump_status("Opening") self.partition_context = PartitionContext(self.host, self.lease.partition_id, @@ -82,13 +83,15 @@ async def on_open_async(self): def is_closing(self): """ Returns whether pump is closing. - :returns: bool + + :rtype: bool """ return self.pump_status == "Closing" or self.pump_status == "Closed" async def close_async(self, reason): """ Safely closes the pump. + :param reason: The reason for the shutdown. :type reason: str """ @@ -122,6 +125,7 @@ async def close_async(self, reason): async def on_closing_async(self, reason): """ Event handler for on closing event. + :param reason: The reason for the shutdown. :type reason: str """ @@ -130,8 +134,9 @@ async def on_closing_async(self, reason): async def process_events_async(self, events): """ Process pump events. + :param events: List of events to be processed. - :type events: list of ~azure.eventhub.EventData + :type events: list[~azure.eventhub.EventData] """ if events: # Synchronize to serialize calls to the processor. The handler is not installed until @@ -149,6 +154,7 @@ async def process_events_async(self, events): async def process_error_async(self, error): """ Passes error to the event processor for processing. + :param error: An error the occurred. :type error: Exception """ From a82bdf274ed2931d70432ef04da3684b9d13547f Mon Sep 17 00:00:00 2001 From: antisch Date: Mon, 25 Jun 2018 21:52:33 -0700 Subject: [PATCH 04/11] Started iothub support --- .vscode/settings.json | 4 ++ HISTORY.rst | 10 +++++ azure/eventhub/__init__.py | 70 +++++++++++++++++++------------- azure/eventhub/async/__init__.py | 19 ++++----- setup.py | 2 +- 5 files changed, 64 insertions(+), 41 deletions(-) create mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..1097d23 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,4 @@ +{ + "python.pythonPath": "${workspaceFolder}/env36/bin/python", + "python.linting.enabled": false +} \ No newline at end of file diff --git a/HISTORY.rst b/HISTORY.rst index 4fa8fac..ff1f04f 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -3,6 +3,16 @@ Release History =============== +0.2.0rc1 (unreleased) ++++++++++++++++++++++ + +- Updated uAMQP dependency to vRC2 +- Added support for constructing IoTHub connections. +- Removed optional `callback` argument from `Receiver.receive` and `AsyncReceiver.receive`. + This removes the potential for messages to be processed via callback for not yet returned + in the batch. + + 0.2.0b2 (2018-05-29) ++++++++++++++++++++ diff --git a/azure/eventhub/__init__.py b/azure/eventhub/__init__.py index 7892358..56ad301 100644 --- a/azure/eventhub/__init__.py +++ b/azure/eventhub/__init__.py @@ -11,11 +11,9 @@ import time import asyncio try: - from urllib import urlparse - from urllib import unquote_plus + from urllib import urlparse, unquote_plus, urlencode except ImportError: - from urllib.parse import unquote_plus - from urllib.parse import urlparse + from urllib.parse import urlparse, unquote_plus, urlencode import uamqp from uamqp import Connection @@ -52,6 +50,29 @@ def _parse_conn_str(conn_str): return endpoint, shared_access_key_name, shared_access_key, entity_path +def _generate_sas_token(uri, policy, key, expiry=None): + """Create a shared access signiture token as a string literal. + :returns: SAS token as string literal. + :rtype: str + """ + from base64 import b64encode, b64decode + from hashlib import sha256 + from hmac import HMAC + if not expiry: + expiry = time.time() + 3600 # Default to 1 hour. + encoded_uri = quote_plus(uri) + ttl = int(expiry) + sign_key = '%s\n%d' % (encoded_uri, ttl) + signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest()) + result = { + 'sr': uri, + 'sig': signature, + 'se': str(ttl)} + if policy: + result['skn'] = policy + return 'SharedAccessSignature ' + urlencode(result) + + def _build_uri(address, entity): parsed = urlparse(address) if parsed.path: @@ -117,6 +138,14 @@ def from_connection_string(cls, conn_str, eventhub=None, **kwargs): address = _build_uri(address, entity) return cls(address, username=policy, password=key, **kwargs) + @classmethod + def from_iothub_connection_string(cls, conn_str, **kwargs) + address, policy, key, _ = _parse_conn_str(conn_str) + hub_name = address.split('.')[0] + username = "{}@sas.root.{}".format(policy, hub_name) + password = _generate_sas_token(address, policy, key) + return cls(address, username=username, password=password, **kwargs) + def _create_auth(self, auth_uri, username, password): # pylint: disable=no-self-use """ Create an ~uamqp.authentication.SASTokenAuth instance to authenticate @@ -411,7 +440,6 @@ def __init__(self, client, source, prefetch=300, epoch=None): :type epoch: int """ self.offset = None - self._callback = None self.prefetch = prefetch self.epoch = epoch properties = None @@ -437,22 +465,7 @@ def queue_size(self): return self._handler._received_messages.qsize() return 0 - def on_message(self, event): - """ - Callback to process a received message and wrap it in EventData. - Will also call a user supplied callback. - - :param event: The received message. - :type event: ~uamqp.message.Message - :rtype: ~azure.eventhub.EventData. - """ - event_data = EventData(message=event) - if self._callback: - self._callback(event_data) - self.offset = event_data.offset - return event_data - - def receive(self, max_batch_size=None, callback=None, timeout=None): + def receive(self, max_batch_size=None, timeout=None): """ Receive events from the EventHub. @@ -462,20 +475,19 @@ def receive(self, max_batch_size=None, callback=None, timeout=None): retrieve before the time, the result will be empty. If no batch size is supplied, the prefetch size will be the maximum. :type max_batch_size: int - :param callback: A callback to be run for each received event. This must - be a function that accepts a single argument - the event data. This callback - will be run before the message is returned in the result generator. - :type callback: func[~azure.eventhub.EventData] :rtype: list[~azure.eventhub.EventData] """ try: timeout_ms = 1000 * timeout if timeout else 0 - self._callback = callback - batch = self._handler.receive_message_batch( + message_batch = self._handler.receive_message_batch( max_batch_size=max_batch_size, - on_message_received=self.on_message, timeout=timeout_ms) - return batch + data_batch = [] + for message in message_batch: + event_data = EventData(message=event) + self.offset = event_data.offset + data_batch.append(event_data) + return data_batch except errors.AMQPConnectionError as e: message = "Failed to open receiver: {}".format(e) message += "\nPlease check that the partition key is valid " diff --git a/azure/eventhub/async/__init__.py b/azure/eventhub/async/__init__.py index e2e80d5..e03b2e8 100644 --- a/azure/eventhub/async/__init__.py +++ b/azure/eventhub/async/__init__.py @@ -4,7 +4,6 @@ # -------------------------------------------------------------------------------------------- import logging -import queue import asyncio import time import datetime @@ -257,7 +256,6 @@ def __init__(self, client, source, prefetch=300, epoch=None, loop=None): # pyli """ self.loop = loop or asyncio.get_event_loop() self.offset = None - self._callback = None self.prefetch = prefetch self.epoch = epoch properties = None @@ -272,7 +270,7 @@ def __init__(self, client, source, prefetch=300, epoch=None, loop=None): # pyli timeout=self.timeout, loop=self.loop) - async def receive(self, max_batch_size=None, callback=None, timeout=None): + async def receive(self, max_batch_size=None, timeout=None): """ Receive events asynchronously from the EventHub. @@ -282,20 +280,19 @@ async def receive(self, max_batch_size=None, callback=None, timeout=None): retrieve before the time, the result will be empty. If no batch size is supplied, the prefetch size will be the maximum. :type max_batch_size: int - :param callback: A callback to be run for each received event. This must - be a function that accepts a single argument - the event data. This callback - will be run before the message is returned in the result generator. - :type callback: func[~azure.eventhub.EventData] :rtype: list[~azure.eventhub.EventData] """ try: - self._callback = callback timeout_ms = 1000 * timeout if timeout else 0 - batch = await self._handler.receive_message_batch_async( + message_batch = await self._handler.receive_message_batch_async( max_batch_size=max_batch_size, - on_message_received=self.on_message, timeout=timeout_ms) - return batch + data_batch = [] + for message in message_batch: + event_data = EventData(message=event) + self.offset = event_data.offset + data_batch.append(event_data) + return data_batch except errors.AMQPConnectionError as e: message = "Failed to open receiver: {}".format(e) message += "\nPlease check that the partition key is valid " diff --git a/setup.py b/setup.py index 1cc16ce..b6275ea 100644 --- a/setup.py +++ b/setup.py @@ -55,7 +55,7 @@ zip_safe=False, packages=find_packages(exclude=["examples", "tests"]), install_requires=[ - 'uamqp==0.1.0rc1', + 'uamqp==0.1.0rc2', 'msrestazure~=0.4.11', 'azure-common~=1.1', 'azure-storage~=0.36.0' From f0e841887dc15f432cb3c6becd617b2b9ec26c8d Mon Sep 17 00:00:00 2001 From: annatisch Date: Tue, 26 Jun 2018 07:49:15 -0700 Subject: [PATCH 05/11] Fixed long running test --- tests/test_longrunning_receive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_longrunning_receive.py b/tests/test_longrunning_receive.py index 6763a24..b5519e3 100644 --- a/tests/test_longrunning_receive.py +++ b/tests/test_longrunning_receive.py @@ -41,7 +41,7 @@ async def pump(_pid, receiver, _args, _dl): print("{}: No events received, queue size {}, delivered {}".format( _pid, receiver.queue_size, - receiver.delivered)) + total)) elif iteration >= 80: iteration = 0 print("{}: total received {}, last sn={}, last offset={}".format( From 808a63880f41bcc8c7dc00c9d6724999d9c85a40 Mon Sep 17 00:00:00 2001 From: annatisch Date: Tue, 26 Jun 2018 09:37:08 -0700 Subject: [PATCH 06/11] Fixed typo and memory leak --- azure/eventhub/__init__.py | 11 +++++------ azure/eventhub/async/__init__.py | 2 +- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/azure/eventhub/__init__.py b/azure/eventhub/__init__.py index 56ad301..47a37ec 100644 --- a/azure/eventhub/__init__.py +++ b/azure/eventhub/__init__.py @@ -139,7 +139,7 @@ def from_connection_string(cls, conn_str, eventhub=None, **kwargs): return cls(address, username=policy, password=key, **kwargs) @classmethod - def from_iothub_connection_string(cls, conn_str, **kwargs) + def from_iothub_connection_string(cls, conn_str, **kwargs): address, policy, key, _ = _parse_conn_str(conn_str) hub_name = address.split('.')[0] username = "{}@sas.root.{}".format(policy, hub_name) @@ -484,7 +484,7 @@ def receive(self, max_batch_size=None, timeout=None): timeout=timeout_ms) data_batch = [] for message in message_batch: - event_data = EventData(message=event) + event_data = EventData(message=message) self.offset = event_data.offset data_batch.append(event_data) return data_batch @@ -540,15 +540,12 @@ def __init__(self, body=None, batch=None, message=None): self._partition_key = types.AMQPSymbol(EventData.PROP_PARTITION_KEY) self._annotations = {} self._properties = {} - self._header = MessageHeader() - self._header.durable = True if batch: self.message = BatchMessage(data=batch, multi_messages=True) elif message: self.message = message self._annotations = message.annotations self._properties = message.application_properties - self._header = message.header else: if isinstance(body, list) and body: self.message = Message(body[0]) @@ -625,8 +622,10 @@ def partition_key(self, value): """ annotations = dict(self._annotations) annotations[self._partition_key] = value + header = MessageHeader() + header.durable = True self.message.annotations = annotations - self.message.header = self._header + self.message.header = header self._annotations = annotations @property diff --git a/azure/eventhub/async/__init__.py b/azure/eventhub/async/__init__.py index e03b2e8..8e978b5 100644 --- a/azure/eventhub/async/__init__.py +++ b/azure/eventhub/async/__init__.py @@ -289,7 +289,7 @@ async def receive(self, max_batch_size=None, timeout=None): timeout=timeout_ms) data_batch = [] for message in message_batch: - event_data = EventData(message=event) + event_data = EventData(message=message) self.offset = event_data.offset data_batch.append(event_data) return data_batch From 04b7f9e2b9cab111a88d92a34975e1c398ed2950 Mon Sep 17 00:00:00 2001 From: annatisch Date: Mon, 2 Jul 2018 09:50:50 -0700 Subject: [PATCH 07/11] Restructure --- HISTORY.rst | 4 + azure/eventhub/__init__.py | 713 +----------------- azure/eventhub/{async => _async}/__init__.py | 152 +--- azure/eventhub/_async/receiver_async.py | 85 +++ azure/eventhub/_async/sender_async.py | 60 ++ azure/eventhub/client.py | 325 ++++++++ azure/eventhub/common.py | 207 +++++ azure/eventhub/receiver.py | 105 +++ azure/eventhub/sender.py | 99 +++ azure/eventprocessorhost/eh_partition_pump.py | 3 +- azure/eventprocessorhost/partition_manager.py | 2 +- examples/recv_async.py | 3 +- examples/recv_epoch.py | 3 +- examples/send_async.py | 3 +- setup.cfg | 2 - tests/test_negative.py | 8 +- tests/test_receive_async.py | 5 +- tests/test_send_async.py | 4 +- 18 files changed, 930 insertions(+), 853 deletions(-) rename azure/eventhub/{async => _async}/__init__.py (56%) create mode 100644 azure/eventhub/_async/receiver_async.py create mode 100644 azure/eventhub/_async/sender_async.py create mode 100644 azure/eventhub/client.py create mode 100644 azure/eventhub/common.py create mode 100644 azure/eventhub/receiver.py create mode 100644 azure/eventhub/sender.py delete mode 100644 setup.cfg diff --git a/HISTORY.rst b/HISTORY.rst index ff1f04f..3d567d5 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -6,11 +6,15 @@ Release History 0.2.0rc1 (unreleased) +++++++++++++++++++++ +- **Breaking change** Restructured library to support Python 3.7. Submodule `async` has been renamed and all classes from + this module can now be imported from azure.eventhub directly. - Updated uAMQP dependency to vRC2 - Added support for constructing IoTHub connections. - Removed optional `callback` argument from `Receiver.receive` and `AsyncReceiver.receive`. This removes the potential for messages to be processed via callback for not yet returned in the batch. +- Fixed memory leak in receive operations. +- Dropped Python 2.7 wheel support. 0.2.0b2 (2018-05-29) diff --git a/azure/eventhub/__init__.py b/azure/eventhub/__init__.py index 47a37ec..d730dcb 100644 --- a/azure/eventhub/__init__.py +++ b/azure/eventhub/__init__.py @@ -3,708 +3,17 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -import logging -import datetime -import sys -import threading -import uuid -import time -import asyncio -try: - from urllib import urlparse, unquote_plus, urlencode -except ImportError: - from urllib.parse import urlparse, unquote_plus, urlencode - -import uamqp -from uamqp import Connection -from uamqp import SendClient, ReceiveClient -from uamqp import Message, BatchMessage -from uamqp import Source, Target -from uamqp import authentication -from uamqp import constants, types, errors -from uamqp.message import MessageHeader - - __version__ = "0.2.0b2" -log = logging.getLogger(__name__) - - -def _parse_conn_str(conn_str): - endpoint = None - shared_access_key_name = None - shared_access_key = None - entity_path = None - for element in conn_str.split(';'): - key, _, value = element.partition('=') - if key.lower() == 'endpoint': - endpoint = value.rstrip('/') - elif key.lower() == 'sharedaccesskeyname': - shared_access_key_name = value - elif key.lower() == 'sharedaccesskey': - shared_access_key = value - elif key.lower() == 'entitypath': - entity_path = value - if not all([endpoint, shared_access_key_name, shared_access_key]): - raise ValueError("Invalid connection string") - return endpoint, shared_access_key_name, shared_access_key, entity_path - - -def _generate_sas_token(uri, policy, key, expiry=None): - """Create a shared access signiture token as a string literal. - :returns: SAS token as string literal. - :rtype: str - """ - from base64 import b64encode, b64decode - from hashlib import sha256 - from hmac import HMAC - if not expiry: - expiry = time.time() + 3600 # Default to 1 hour. - encoded_uri = quote_plus(uri) - ttl = int(expiry) - sign_key = '%s\n%d' % (encoded_uri, ttl) - signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest()) - result = { - 'sr': uri, - 'sig': signature, - 'se': str(ttl)} - if policy: - result['skn'] = policy - return 'SharedAccessSignature ' + urlencode(result) - - -def _build_uri(address, entity): - parsed = urlparse(address) - if parsed.path: - return address - if not entity: - raise ValueError("No EventHub specified") - address += "/" + str(entity) - return address - - -class EventHubClient(object): - """ - The EventHubClient class defines a high level interface for sending - events to and receiving events from the Azure Event Hubs service. - """ - - def __init__(self, address, username=None, password=None, debug=False): - """ - Constructs a new EventHubClient with the given address URL. - - :param address: The full URI string of the Event Hub. This can optionally - include URL-encoded access name and key. - :type address: str - :param username: The name of the shared access policy. This must be supplied - if not encoded into the address. - :type username: str - :param password: The shared access key. This must be supplied if not encoded - into the address. - :type password: str - :param debug: Whether to output network trace logs to the logger. Default - is `False`. - :type debug: bool - """ - self.container_id = "eventhub.pysdk-" + str(uuid.uuid4())[:8] - self.address = urlparse(address) - url_username = unquote_plus(self.address.username) if self.address.username else None - username = username or url_username - url_password = unquote_plus(self.address.password) if self.address.password else None - password = password or url_password - if not username or not password: - raise ValueError("Missing username and/or password.") - auth_uri = "sb://{}{}".format(self.address.hostname, self.address.path) - self.auth = self._create_auth(auth_uri, username, password) - self.connection = None - self.debug = debug - - self.clients = [] - self.stopped = False - log.info("{}: Created the Event Hub client".format(self.container_id)) - - @classmethod - def from_connection_string(cls, conn_str, eventhub=None, **kwargs): - """ - Create an EventHubClient from a connection string. - - :param conn_str: The connection string. - :type conn_str: str - :param eventhub: The name of the EventHub, if the EntityName is - not included in the connection string. - """ - address, policy, key, entity = _parse_conn_str(conn_str) - entity = eventhub or entity - address = _build_uri(address, entity) - return cls(address, username=policy, password=key, **kwargs) - - @classmethod - def from_iothub_connection_string(cls, conn_str, **kwargs): - address, policy, key, _ = _parse_conn_str(conn_str) - hub_name = address.split('.')[0] - username = "{}@sas.root.{}".format(policy, hub_name) - password = _generate_sas_token(address, policy, key) - return cls(address, username=username, password=password, **kwargs) - - def _create_auth(self, auth_uri, username, password): # pylint: disable=no-self-use - """ - Create an ~uamqp.authentication.SASTokenAuth instance to authenticate - the session. - - :param auth_uri: The URI to authenticate against. - :type auth_uri: str - :param username: The name of the shared access policy. - :type username: str - :param password: The shared access key. - :type password: str - """ - return authentication.SASTokenAuth.from_shared_access_key(auth_uri, username, password) - - def _create_properties(self): # pylint: disable=no-self-use - """ - Format the properties with which to instantiate the connection. - This acts like a user agent over HTTP. - - :rtype: dict - """ - properties = {} - properties["product"] = "eventhub.python" - properties["version"] = __version__ - properties["framework"] = "Python {}.{}.{}".format(*sys.version_info[0:3]) - properties["platform"] = sys.platform - return properties - - def _create_connection(self): - """ - Create a new ~uamqp.connection.Connection instance that will be shared between all - Sender/Receiver clients. - """ - if not self.connection: - log.info("{}: Creating connection with address={}".format( - self.container_id, self.address.geturl())) - self.connection = Connection( - self.address.hostname, - self.auth, - container_id=self.container_id, - properties=self._create_properties(), - debug=self.debug) - - def _close_connection(self): - """ - Close and destroy the connection. - """ - if self.connection: - self.connection.destroy() - self.connection = None - - def _close_clients(self): - """ - Close all open Sender/Receiver clients. - """ - for client in self.clients: - client.close() - - def run(self): - """ - Run the EventHubClient in blocking mode. - Opens the connection and starts running all Sender/Receiver clients. - - :rtype: ~azure.eventhub.EventHubClient - """ - log.info("{}: Starting {} clients".format(self.container_id, len(self.clients))) - self._create_connection() - for client in self.clients: - client.open(connection=self.connection) - return self - - def stop(self): - """ - Stop the EventHubClient and all its Sender/Receiver clients. - """ - log.info("{}: Stopping {} clients".format(self.container_id, len(self.clients))) - self.stopped = True - self._close_clients() - self._close_connection() - - def get_eventhub_info(self): - """ - Get details on the specified EventHub. - Keys in the details dictionary include: - -'name' - -'type' - -'created_at' - -'partition_count' - -'partition_ids' - - :rtype: dict - """ - self._create_connection() - eh_name = self.address.path.lstrip('/') - target = "amqps://{}/{}".format(self.address.hostname, eh_name) - mgmt_client = uamqp.AMQPClient(target, auth=self.auth, debug=self.debug) - mgmt_client.open(self.connection) - try: - mgmt_msg = Message(application_properties={'name': eh_name}) - response = mgmt_client.mgmt_request( - mgmt_msg, - constants.READ_OPERATION, - op_type=b'com.microsoft:eventhub', - status_code_field=b'status-code', - description_fields=b'status-description') - eh_info = response.get_data() - output = {} - if eh_info: - output['name'] = eh_info[b'name'].decode('utf-8') - output['type'] = eh_info[b'type'].decode('utf-8') - output['created_at'] = datetime.datetime.fromtimestamp(float(eh_info[b'created_at'])/1000) - output['partition_count'] = eh_info[b'partition_count'] - output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']] - return output - except: - raise - finally: - mgmt_client.close() - - def add_receiver(self, consumer_group, partition, offset=None, prefetch=300): - """ - Add a receiver to the client for a particular consumer group and partition. - - :param consumer_group: The name of the consumer group. - :type consumer_group: str - :param partition: The ID of the partition. - :type partition: str - :param offset: The offset from which to start receiving. - :type offset: ~azure.eventhub.Offset - :param prefetch: The message prefetch count of the receiver. Default is 300. - :type prefetch: int - :rtype: ~azure.eventhub.Receiver - """ - source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( - self.address.hostname, self.address.path, consumer_group, partition) - source = Source(source_url) - if offset is not None: - source.set_filter(offset.selector()) - handler = Receiver(self, source, prefetch=prefetch) - self.clients.append(handler._handler) # pylint: disable=protected-access - return handler - - def add_epoch_receiver(self, consumer_group, partition, epoch, prefetch=300): - """ - Add a receiver to the client with an epoch value. Only a single epoch receiver - can connect to a partition at any given time - additional epoch receivers must have - a higher epoch value or they will be rejected. If a 2nd epoch receiver has - connected, the first will be closed. - - :param consumer_group: The name of the consumer group. - :type consumer_group: str - :param partition: The ID of the partition. - :type partition: str - :param epoch: The epoch value for the receiver. - :type epoch: int - :param prefetch: The message prefetch count of the receiver. Default is 300. - :type prefetch: int - :rtype: ~azure.eventhub.Receiver - """ - source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( - self.address.hostname, self.address.path, consumer_group, partition) - handler = Receiver(self, source_url, prefetch=prefetch, epoch=epoch) - self.clients.append(handler._handler) # pylint: disable=protected-access - return handler - - def add_sender(self, partition=None): - """ - Add a sender to the client to send ~azure.eventhub.EventData object - to an EventHub. - - :param partition: Optionally specify a particular partition to send to. - If omitted, the events will be distributed to available partitions via - round-robin. - :type parition: str - :rtype: ~azure.eventhub.Sender - """ - target = "amqps://{}{}".format(self.address.hostname, self.address.path) - handler = Sender(self, target, partition=partition) - self.clients.append(handler._handler) # pylint: disable=protected-access - return handler - - -class Sender: - """ - Implements a Sender. - """ - TIMEOUT = 60.0 - - def __init__(self, client, target, partition=None): - """ - Instantiate an EventHub event Sender client. - - :param client: The parent EventHubClient. - :type client: ~azure.eventhub.EventHubClient. - :param target: The URI of the EventHub to send to. - :type target: str - """ - self.partition = partition - if partition: - target += "/Partitions/" + partition - self._handler = SendClient( - target, - auth=client.auth, - debug=client.debug, - msg_timeout=Sender.TIMEOUT) - self._outcome = None - self._condition = None - - def send(self, event_data): - """ - Sends an event data and blocks until acknowledgement is - received or operation times out. - - :param event_data: The event to be sent. - :type event_data: ~azure.eventhub.EventData - :raises: ~azure.eventhub.EventHubError if the message fails to - send. - :return: The outcome of the message send. - :rtype: ~uamqp.constants.MessageSendResult - """ - if event_data.partition_key and self.partition: - raise ValueError("EventData partition key cannot be used with a partition sender.") - event_data.message.on_send_complete = self._on_outcome - try: - self._handler.send_message(event_data.message) - if self._outcome != constants.MessageSendResult.Ok: - raise Sender._error(self._outcome, self._condition) - except Exception as e: - raise EventHubError("Send failed: {}".format(e)) - else: - return self._outcome - - def transfer(self, event_data, callback=None): - """ - Transfers an event data and notifies the callback when the operation is done. - - :param event_data: The event to be sent. - :type event_data: ~azure.eventhub.EventData - :param callback: Callback to be run once the message has been send. - This must be a function that accepts two arguments. - :type callback: func[~uamqp.constants.MessageSendResult, ~azure.eventhub.EventHubError] - """ - if event_data.partition_key and self.partition: - raise ValueError("EventData partition key cannot be used with a partition sender.") - if callback: - event_data.message.on_send_complete = lambda o, c: callback(o, Sender._error(o, c)) - self._handler.queue_message(event_data.message) - - def wait(self): - """ - Wait until all transferred events have been sent. - """ - try: - self._handler.wait() - except Exception as e: - raise EventHubError("Send failed: {}".format(e)) +from azure.eventhub.common import EventData, EventHubError, Offset +from azure.eventhub.client import EventHubClient +from azure.eventhub.sender import Sender +from azure.eventhub.receiver import Receiver - def _on_outcome(self, outcome, condition): - """ - Called when the outcome is received for a delivery. - - :param outcome: The outcome of the message delivery - success or failure. - :type outcome: ~uamqp.constants.MessageSendResult - """ - self._outcome = outcome - self._condition = condition - - @staticmethod - def _error(outcome, condition): - return None if outcome == constants.MessageSendResult.Ok else EventHubError(outcome, condition) - - -class Receiver: - """ - Implements a Receiver. - """ - timeout = 0 - _epoch = b'com.microsoft:epoch' - - def __init__(self, client, source, prefetch=300, epoch=None): - """ - Instantiate a receiver. - - :param client: The parent EventHubClient. - :type client: ~azure.eventhub.EventHubClient - :param source: The source EventHub from which to receive events. - :type source: ~uamqp.address.Source - :param prefetch: The number of events to prefetch from the service - for processing. Default is 300. - :type prefetch: int - :param epoch: An optional epoch value. - :type epoch: int - """ - self.offset = None - self.prefetch = prefetch - self.epoch = epoch - properties = None - if epoch: - properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))} - self._handler = ReceiveClient( - source, - auth=client.auth, - debug=client.debug, - prefetch=self.prefetch, - link_properties=properties, - timeout=self.timeout) - - @property - def queue_size(self): - """ - The current size of the unprocessed message queue. - - :rtype: int - """ - # pylint: disable=protected-access - if self._handler._received_messages: - return self._handler._received_messages.qsize() - return 0 - - def receive(self, max_batch_size=None, timeout=None): - """ - Receive events from the EventHub. - - :param max_batch_size: Receive a batch of events. Batch size will - be up to the maximum specified, but will return as soon as service - returns no new events. If combined with a timeout and no events are - retrieve before the time, the result will be empty. If no batch - size is supplied, the prefetch size will be the maximum. - :type max_batch_size: int - :rtype: list[~azure.eventhub.EventData] - """ - try: - timeout_ms = 1000 * timeout if timeout else 0 - message_batch = self._handler.receive_message_batch( - max_batch_size=max_batch_size, - timeout=timeout_ms) - data_batch = [] - for message in message_batch: - event_data = EventData(message=message) - self.offset = event_data.offset - data_batch.append(event_data) - return data_batch - except errors.AMQPConnectionError as e: - message = "Failed to open receiver: {}".format(e) - message += "\nPlease check that the partition key is valid " - if self.epoch: - message += ("and that a higher epoch receiver is not " - "already running for this partition.") - else: - message += ("and whether an epoch receiver is " - "already running for this partition.") - raise EventHubError(message) - except Exception as e: - raise EventHubError("Receive failed: {}".format(e)) - - def selector(self, default): - """ - Create a selector for the current offset if it is set. - - :param default: The fallback receive offset. - :type default: ~azure.eventhub.Offset - :rtype: ~azure.eventhub.Offset - """ - if self.offset is not None: - return Offset(self.offset).selector() - return default - - -class EventData(object): - """ - The EventData class is a holder of event content. - Acts as a wrapper to an ~uamqp.message.Message object. - """ - - PROP_SEQ_NUMBER = b"x-opt-sequence-number" - PROP_OFFSET = b"x-opt-offset" - PROP_PARTITION_KEY = b"x-opt-partition-key" - PROP_TIMESTAMP = b"x-opt-enqueued-time" - PROP_DEVICE_ID = b"iothub-connection-device-id" - - def __init__(self, body=None, batch=None, message=None): - """ - Initialize EventData. - - :param body: The data to send in a single message. - :type body: str, bytes or list - :param batch: A data generator to send batched messages. - :type batch: Generator - :param message: The received message. - :type message: ~uamqp.message.Message - """ - self._partition_key = types.AMQPSymbol(EventData.PROP_PARTITION_KEY) - self._annotations = {} - self._properties = {} - if batch: - self.message = BatchMessage(data=batch, multi_messages=True) - elif message: - self.message = message - self._annotations = message.annotations - self._properties = message.application_properties - else: - if isinstance(body, list) and body: - self.message = Message(body[0]) - for more in body[1:]: - self.message._body.append(more) # pylint: disable=protected-access - elif body is None: - raise ValueError("EventData cannot be None.") - else: - self.message = Message(body) - - - @property - def sequence_number(self): - """ - The sequence number of the event data object. - - :rtype: int - """ - return self._annotations.get(EventData.PROP_SEQ_NUMBER, None) - - @property - def offset(self): - """ - The offset of the event data object. - - :rtype: int - """ - try: - return self._annotations[EventData.PROP_OFFSET].decode('UTF-8') - except (KeyError, AttributeError): - return None - - @property - def enqueued_time(self): - """ - The enqueued timestamp of the event data object. - - :rtype: datetime.datetime - """ - timestamp = self._annotations.get(EventData.PROP_TIMESTAMP, None) - if timestamp: - return datetime.datetime.fromtimestamp(float(timestamp)/1000) - return None - - @property - def device_id(self): - """ - The device ID of the event data object. This is only used for - IoT Hub implementations. - - :rtype: bytes - """ - return self._annotations.get(EventData.PROP_DEVICE_ID, None) - - @property - def partition_key(self): - """ - The partition key of the event data object. - - :rtype: bytes - """ - try: - return self._annotations[self._partition_key] - except KeyError: - return self._annotations.get(EventData.PROP_PARTITION_KEY, None) - - @partition_key.setter - def partition_key(self, value): - """ - Set the partition key of the event data object. - - :param value: The partition key to set. - :type value: str or bytes - """ - annotations = dict(self._annotations) - annotations[self._partition_key] = value - header = MessageHeader() - header.durable = True - self.message.annotations = annotations - self.message.header = header - self._annotations = annotations - - @property - def properties(self): - """ - Application defined properties on the message. - - :rtype: dict - """ - return self._properties - - @properties.setter - def properties(self, value): - """ - Application defined properties on the message. - - :param value: The application properties for the EventData. - :type value: dict - """ - self._properties = value - properties = dict(self._properties) - self.message.application_properties = properties - - @property - def body(self): - """ - The body of the event data object. - - :rtype: bytes or generator[bytes] - """ - return self.message.get_data() - - -class Offset(object): - """ - The offset (position or timestamp) where a receiver starts. Examples: - Beginning of the event stream: - >>> offset = Offset("-1") - End of the event stream: - >>> offset = Offset("@latest") - Events after the specified offset: - >>> offset = Offset("12345") - Events from the specified offset: - >>> offset = Offset("12345", True) - Events after a datetime: - >>> offset = Offset(datetime.datetime.utcnow()) - Events after a specific sequence number: - >>> offset = Offset(1506968696002) - """ - - def __init__(self, value, inclusive=False): - """ - Initialize Offset. - - :param value: The offset value. - :type value: ~datetime.datetime or int or str - :param inclusive: Whether to include the supplied value as the start point. - :type inclusive: bool - """ - self.value = value - self.inclusive = inclusive - - def selector(self): - """ - Creates a selector expression of the offset. - - :rtype: bytes - """ - operator = ">=" if self.inclusive else ">" - if isinstance(self.value, datetime.datetime): - timestamp = (time.mktime(self.value.timetuple()) * 1000) + (self.value.microsecond/1000) - return ("amqp.annotation.x-opt-enqueued-time {} '{}'".format(operator, int(timestamp))).encode('utf-8') - elif isinstance(self.value, int): - return ("amqp.annotation.x-opt-sequence-number {} '{}'".format(operator, self.value)).encode('utf-8') - return ("amqp.annotation.x-opt-offset {} '{}'".format(operator, self.value)).encode('utf-8') - - -class EventHubError(Exception): - """ - Represents an error happened in the client. - """ - pass +try: + from azure.eventhub._async import ( + EventHubClientAsync, + AsyncSender, + AsyncReceiver) +except (ImportError, SyntaxError): + pass # Python 3 async features not supported diff --git a/azure/eventhub/async/__init__.py b/azure/eventhub/_async/__init__.py similarity index 56% rename from azure/eventhub/async/__init__.py rename to azure/eventhub/_async/__init__.py index 8e978b5..5831051 100644 --- a/azure/eventhub/async/__init__.py +++ b/azure/eventhub/_async/__init__.py @@ -8,12 +8,16 @@ import time import datetime -from uamqp.async import SASTokenAsync -from uamqp.async import ConnectionAsync -from uamqp import Message, AMQPClientAsync, SendClientAsync, ReceiveClientAsync, Source from uamqp import constants, types, errors +from uamqp.authentication import SASTokenAsync +from uamqp import ( + Message, + Source, + ConnectionAsync, + AMQPClientAsync, + SendClientAsync, + ReceiveClientAsync) -from azure import eventhub from azure.eventhub import ( Sender, Receiver, @@ -21,6 +25,9 @@ EventData, EventHubError) +from .sender_async import AsyncSender +from .receiver_async import AsyncReceiver + log = logging.getLogger(__name__) @@ -33,7 +40,7 @@ class EventHubClientAsync(EventHubClient): def _create_auth(self, auth_uri, username, password): # pylint: disable=no-self-use """ - Create an ~uamqp.async.authentication_async.SASTokenAuthAsync instance to authenticate + Create an ~uamqp.authentication.cbs_auth_async.SASTokenAuthAsync instance to authenticate the session. :param auth_uri: The URI to authenticate against. @@ -47,7 +54,7 @@ def _create_auth(self, auth_uri, username, password): # pylint: disable=no-self def _create_connection_async(self): """ - Create a new ~uamqp.async.connection_async.ConnectionAsync instance that will be shared between all + Create a new ~uamqp._async.connection_async.ConnectionAsync instance that will be shared between all AsyncSender/AsyncReceiver clients. """ if not self.connection: @@ -80,7 +87,7 @@ async def run_async(self): Run the EventHubClient asynchronously. Opens the connection and starts running all AsyncSender/AsyncReceiver clients. - :rtype: ~azure.eventhub.async.EventHubClientAsync + :rtype: ~azure.eventhub._async.EventHubClientAsync """ log.info("{}: Starting {} clients".format(self.container_id, len(self.clients))) self._create_connection_async() @@ -132,10 +139,10 @@ def add_async_receiver(self, consumer_group, partition, offset=None, prefetch=30 :param partition: The ID of the partition. :type partition: str :param offset: The offset from which to start receiving. - :type offset: ~azure.eventhub.Offset + :type offset: ~azure.eventhub.common.Offset :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int - :rtype: ~azure.eventhub.async.ReceiverAsync + :rtype: ~azure.eventhub._async.receiver_async.ReceiverAsync """ source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, self.address.path, consumer_group, partition) @@ -161,7 +168,7 @@ def add_async_epoch_receiver(self, consumer_group, partition, epoch, prefetch=30 :type epoch: int :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int - :rtype: ~azure.eventhub.async.ReceiverAsync + :rtype: ~azure.eventhub._async.receiver_async.ReceiverAsync """ source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( self.address.hostname, self.address.path, consumer_group, partition) @@ -171,137 +178,16 @@ def add_async_epoch_receiver(self, consumer_group, partition, epoch, prefetch=30 def add_async_sender(self, partition=None, loop=None): """ - Add an async sender to the client to send ~azure.eventhub.EventData object + Add an async sender to the client to send ~azure.eventhub.common.EventData object to an EventHub. :param partition: Optionally specify a particular partition to send to. If omitted, the events will be distributed to available partitions via round-robin. :type partition: str - :rtype: ~azure.eventhub.async.SenderAsync + :rtype: ~azure.eventhub._async.sender_async.SenderAsync """ target = "amqps://{}{}".format(self.address.hostname, self.address.path) handler = AsyncSender(self, target, partition=partition, loop=loop) self.clients.append(handler._handler) # pylint: disable=protected-access return handler - -class AsyncSender(Sender): - """ - Implements the async API of a Sender. - """ - - def __init__(self, client, target, partition=None, loop=None): # pylint: disable=super-init-not-called - """ - Instantiate an EventHub event SenderAsync client. - - :param client: The parent EventHubClientAsync. - :type client: ~azure.eventhub.async.EventHubClientAsync - :param target: The URI of the EventHub to send to. - :type target: str - :param loop: An event loop. - """ - self.partition = partition - if partition: - target += "/Partitions/" + partition - self.loop = loop or asyncio.get_event_loop() - self._handler = SendClientAsync( - target, - auth=client.auth, - debug=client.debug, - msg_timeout=Sender.TIMEOUT, - loop=self.loop) - self._outcome = None - self._condition = None - - async def send(self, event_data): - """ - Sends an event data and asynchronously waits until - acknowledgement is received or operation times out. - - :param event_data: The event to be sent. - :type event_data: ~azure.eventhub.EventData - :raises: ~azure.eventhub.EventHubError if the message fails to - send. - """ - if event_data.partition_key and self.partition: - raise ValueError("EventData partition key cannot be used with a partition sender.") - event_data.message.on_send_complete = self._on_outcome - try: - await self._handler.send_message_async(event_data.message) - if self._outcome != constants.MessageSendResult.Ok: - raise Sender._error(self._outcome, self._condition) - except Exception as e: - raise EventHubError("Send failed: {}".format(e)) - - -class AsyncReceiver(Receiver): - """ - Implements the async API of a Receiver. - """ - - def __init__(self, client, source, prefetch=300, epoch=None, loop=None): # pylint: disable=super-init-not-called - """ - Instantiate an async receiver. - - :param client: The parent EventHubClientAsync. - :type client: ~azure.eventhub.async.EventHubClientAsync - :param source: The source EventHub from which to receive events. - :type source: ~uamqp.address.Source - :param prefetch: The number of events to prefetch from the service - for processing. Default is 300. - :type prefetch: int - :param epoch: An optional epoch value. - :type epoch: int - :param loop: An event loop. - """ - self.loop = loop or asyncio.get_event_loop() - self.offset = None - self.prefetch = prefetch - self.epoch = epoch - properties = None - if epoch: - properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))} - self._handler = ReceiveClientAsync( - source, - auth=client.auth, - debug=client.debug, - prefetch=self.prefetch, - link_properties=properties, - timeout=self.timeout, - loop=self.loop) - - async def receive(self, max_batch_size=None, timeout=None): - """ - Receive events asynchronously from the EventHub. - - :param max_batch_size: Receive a batch of events. Batch size will - be up to the maximum specified, but will return as soon as service - returns no new events. If combined with a timeout and no events are - retrieve before the time, the result will be empty. If no batch - size is supplied, the prefetch size will be the maximum. - :type max_batch_size: int - :rtype: list[~azure.eventhub.EventData] - """ - try: - timeout_ms = 1000 * timeout if timeout else 0 - message_batch = await self._handler.receive_message_batch_async( - max_batch_size=max_batch_size, - timeout=timeout_ms) - data_batch = [] - for message in message_batch: - event_data = EventData(message=message) - self.offset = event_data.offset - data_batch.append(event_data) - return data_batch - except errors.AMQPConnectionError as e: - message = "Failed to open receiver: {}".format(e) - message += "\nPlease check that the partition key is valid " - if self.epoch: - message += "and that a higher epoch receiver is " \ - "not already running for this partition." - else: - message += "and whether an epoch receiver is " \ - "already running for this partition." - raise EventHubError(message) - except Exception as e: - raise EventHubError("Receive failed: {}".format(e)) diff --git a/azure/eventhub/_async/receiver_async.py b/azure/eventhub/_async/receiver_async.py new file mode 100644 index 0000000..3c044d4 --- /dev/null +++ b/azure/eventhub/_async/receiver_async.py @@ -0,0 +1,85 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import asyncio + +from uamqp import errors, types +from uamqp import ReceiveClientAsync + +from azure.eventhub import EventHubError, EventData +from azure.eventhub.receiver import Receiver + + +class AsyncReceiver(Receiver): + """ + Implements the async API of a Receiver. + """ + + def __init__(self, client, source, prefetch=300, epoch=None, loop=None): # pylint: disable=super-init-not-called + """ + Instantiate an async receiver. + + :param client: The parent EventHubClientAsync. + :type client: ~azure.eventhub._async.EventHubClientAsync + :param source: The source EventHub from which to receive events. + :type source: ~uamqp.address.Source + :param prefetch: The number of events to prefetch from the service + for processing. Default is 300. + :type prefetch: int + :param epoch: An optional epoch value. + :type epoch: int + :param loop: An event loop. + """ + self.loop = loop or asyncio.get_event_loop() + self.offset = None + self.prefetch = prefetch + self.epoch = epoch + properties = None + if epoch: + properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))} + self._handler = ReceiveClientAsync( + source, + auth=client.auth, + debug=client.debug, + prefetch=self.prefetch, + link_properties=properties, + timeout=self.timeout, + loop=self.loop) + + async def receive(self, max_batch_size=None, timeout=None): + """ + Receive events asynchronously from the EventHub. + + :param max_batch_size: Receive a batch of events. Batch size will + be up to the maximum specified, but will return as soon as service + returns no new events. If combined with a timeout and no events are + retrieve before the time, the result will be empty. If no batch + size is supplied, the prefetch size will be the maximum. + :type max_batch_size: int + :rtype: list[~azure.eventhub.EventData] + """ + try: + timeout_ms = 1000 * timeout if timeout else 0 + message_batch = await self._handler.receive_message_batch_async( + max_batch_size=max_batch_size, + timeout=timeout_ms) + data_batch = [] + for message in message_batch: + event_data = EventData(message=message) + self.offset = event_data.offset + data_batch.append(event_data) + return data_batch + except errors.AMQPConnectionError as e: + message = "Failed to open receiver: {}".format(e) + message += "\nPlease check that the partition key is valid " + if self.epoch: + message += "and that a higher epoch receiver is " \ + "not already running for this partition." + else: + message += "and whether an epoch receiver is " \ + "already running for this partition." + raise EventHubError(message) + except Exception as e: + raise EventHubError("Receive failed: {}".format(e)) diff --git a/azure/eventhub/_async/sender_async.py b/azure/eventhub/_async/sender_async.py new file mode 100644 index 0000000..b702162 --- /dev/null +++ b/azure/eventhub/_async/sender_async.py @@ -0,0 +1,60 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import asyncio + +from uamqp import constants +from uamqp import SendClientAsync + +from azure.eventhub import EventHubError +from azure.eventhub.sender import Sender + +class AsyncSender(Sender): + """ + Implements the async API of a Sender. + """ + + def __init__(self, client, target, partition=None, loop=None): # pylint: disable=super-init-not-called + """ + Instantiate an EventHub event SenderAsync client. + + :param client: The parent EventHubClientAsync. + :type client: ~azure.eventhub._async.EventHubClientAsync + :param target: The URI of the EventHub to send to. + :type target: str + :param loop: An event loop. + """ + self.partition = partition + if partition: + target += "/Partitions/" + partition + self.loop = loop or asyncio.get_event_loop() + self._handler = SendClientAsync( + target, + auth=client.auth, + debug=client.debug, + msg_timeout=Sender.TIMEOUT, + loop=self.loop) + self._outcome = None + self._condition = None + + async def send(self, event_data): + """ + Sends an event data and asynchronously waits until + acknowledgement is received or operation times out. + + :param event_data: The event to be sent. + :type event_data: ~azure.eventhub.EventData + :raises: ~azure.eventhub.EventHubError if the message fails to + send. + """ + if event_data.partition_key and self.partition: + raise ValueError("EventData partition key cannot be used with a partition sender.") + event_data.message.on_send_complete = self._on_outcome + try: + await self._handler.send_message_async(event_data.message) + if self._outcome != constants.MessageSendResult.Ok: + raise Sender._error(self._outcome, self._condition) + except Exception as e: + raise EventHubError("Send failed: {}".format(e)) diff --git a/azure/eventhub/client.py b/azure/eventhub/client.py new file mode 100644 index 0000000..d0a6687 --- /dev/null +++ b/azure/eventhub/client.py @@ -0,0 +1,325 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import logging +import datetime +import sys +import uuid +import time +try: + from urllib import urlparse, unquote_plus, urlencode, quote_plus +except ImportError: + from urllib.parse import urlparse, unquote_plus, urlencode, quote_plus + +import uamqp +from uamqp import Connection +from uamqp import Message +from uamqp import Source +from uamqp import authentication +from uamqp import constants + +from azure.eventhub import __version__ +from azure.eventhub.sender import Sender +from azure.eventhub.receiver import Receiver + +log = logging.getLogger(__name__) + + +def _parse_conn_str(conn_str): + endpoint = None + shared_access_key_name = None + shared_access_key = None + entity_path = None + for element in conn_str.split(';'): + key, _, value = element.partition('=') + if key.lower() == 'endpoint': + endpoint = value.rstrip('/') + elif key.lower() == 'sharedaccesskeyname': + shared_access_key_name = value + elif key.lower() == 'sharedaccesskey': + shared_access_key = value + elif key.lower() == 'entitypath': + entity_path = value + if not all([endpoint, shared_access_key_name, shared_access_key]): + raise ValueError("Invalid connection string") + return endpoint, shared_access_key_name, shared_access_key, entity_path + + +def _generate_sas_token(uri, policy, key, expiry=None): + """Create a shared access signiture token as a string literal. + :returns: SAS token as string literal. + :rtype: str + """ + from base64 import b64encode, b64decode + from hashlib import sha256 + from hmac import HMAC + if not expiry: + expiry = time.time() + 3600 # Default to 1 hour. + encoded_uri = quote_plus(uri) + ttl = int(expiry) + sign_key = '%s\n%d' % (encoded_uri, ttl) + signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest()) + result = { + 'sr': uri, + 'sig': signature, + 'se': str(ttl)} + if policy: + result['skn'] = policy + return 'SharedAccessSignature ' + urlencode(result) + + +def _build_uri(address, entity): + parsed = urlparse(address) + if parsed.path: + return address + if not entity: + raise ValueError("No EventHub specified") + address += "/" + str(entity) + return address + + +class EventHubClient(object): + """ + The EventHubClient class defines a high level interface for sending + events to and receiving events from the Azure Event Hubs service. + """ + + def __init__(self, address, username=None, password=None, debug=False): + """ + Constructs a new EventHubClient with the given address URL. + + :param address: The full URI string of the Event Hub. This can optionally + include URL-encoded access name and key. + :type address: str + :param username: The name of the shared access policy. This must be supplied + if not encoded into the address. + :type username: str + :param password: The shared access key. This must be supplied if not encoded + into the address. + :type password: str + :param debug: Whether to output network trace logs to the logger. Default + is `False`. + :type debug: bool + """ + self.container_id = "eventhub.pysdk-" + str(uuid.uuid4())[:8] + self.address = urlparse(address) + url_username = unquote_plus(self.address.username) if self.address.username else None + username = username or url_username + url_password = unquote_plus(self.address.password) if self.address.password else None + password = password or url_password + if not username or not password: + raise ValueError("Missing username and/or password.") + auth_uri = "sb://{}{}".format(self.address.hostname, self.address.path) + self.auth = self._create_auth(auth_uri, username, password) + self.connection = None + self.debug = debug + + self.clients = [] + self.stopped = False + log.info("{}: Created the Event Hub client".format(self.container_id)) + + @classmethod + def from_connection_string(cls, conn_str, eventhub=None, **kwargs): + """ + Create an EventHubClient from a connection string. + + :param conn_str: The connection string. + :type conn_str: str + :param eventhub: The name of the EventHub, if the EntityName is + not included in the connection string. + """ + address, policy, key, entity = _parse_conn_str(conn_str) + entity = eventhub or entity + address = _build_uri(address, entity) + return cls(address, username=policy, password=key, **kwargs) + + @classmethod + def from_iothub_connection_string(cls, conn_str, **kwargs): + address, policy, key, _ = _parse_conn_str(conn_str) + hub_name = address.split('.')[0] + username = "{}@sas.root.{}".format(policy, hub_name) + password = _generate_sas_token(address, policy, key) + return cls(address, username=username, password=password, **kwargs) + + def _create_auth(self, auth_uri, username, password): # pylint: disable=no-self-use + """ + Create an ~uamqp.authentication.SASTokenAuth instance to authenticate + the session. + + :param auth_uri: The URI to authenticate against. + :type auth_uri: str + :param username: The name of the shared access policy. + :type username: str + :param password: The shared access key. + :type password: str + """ + return authentication.SASTokenAuth.from_shared_access_key(auth_uri, username, password) + + def _create_properties(self): # pylint: disable=no-self-use + """ + Format the properties with which to instantiate the connection. + This acts like a user agent over HTTP. + + :rtype: dict + """ + properties = {} + properties["product"] = "eventhub.python" + properties["version"] = __version__ + properties["framework"] = "Python {}.{}.{}".format(*sys.version_info[0:3]) + properties["platform"] = sys.platform + return properties + + def _create_connection(self): + """ + Create a new ~uamqp.connection.Connection instance that will be shared between all + Sender/Receiver clients. + """ + if not self.connection: + log.info("{}: Creating connection with address={}".format( + self.container_id, self.address.geturl())) + self.connection = Connection( + self.address.hostname, + self.auth, + container_id=self.container_id, + properties=self._create_properties(), + debug=self.debug) + + def _close_connection(self): + """ + Close and destroy the connection. + """ + if self.connection: + self.connection.destroy() + self.connection = None + + def _close_clients(self): + """ + Close all open Sender/Receiver clients. + """ + for client in self.clients: + client.close() + + def run(self): + """ + Run the EventHubClient in blocking mode. + Opens the connection and starts running all Sender/Receiver clients. + + :rtype: ~azure.eventhub.EventHubClient + """ + log.info("{}: Starting {} clients".format(self.container_id, len(self.clients))) + self._create_connection() + for client in self.clients: + client.open(connection=self.connection) + return self + + def stop(self): + """ + Stop the EventHubClient and all its Sender/Receiver clients. + """ + log.info("{}: Stopping {} clients".format(self.container_id, len(self.clients))) + self.stopped = True + self._close_clients() + self._close_connection() + + def get_eventhub_info(self): + """ + Get details on the specified EventHub. + Keys in the details dictionary include: + -'name' + -'type' + -'created_at' + -'partition_count' + -'partition_ids' + + :rtype: dict + """ + self._create_connection() + eh_name = self.address.path.lstrip('/') + target = "amqps://{}/{}".format(self.address.hostname, eh_name) + mgmt_client = uamqp.AMQPClient(target, auth=self.auth, debug=self.debug) + mgmt_client.open(self.connection) + try: + mgmt_msg = Message(application_properties={'name': eh_name}) + response = mgmt_client.mgmt_request( + mgmt_msg, + constants.READ_OPERATION, + op_type=b'com.microsoft:eventhub', + status_code_field=b'status-code', + description_fields=b'status-description') + eh_info = response.get_data() + output = {} + if eh_info: + output['name'] = eh_info[b'name'].decode('utf-8') + output['type'] = eh_info[b'type'].decode('utf-8') + output['created_at'] = datetime.datetime.fromtimestamp(float(eh_info[b'created_at'])/1000) + output['partition_count'] = eh_info[b'partition_count'] + output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']] + return output + except: + raise + finally: + mgmt_client.close() + + def add_receiver(self, consumer_group, partition, offset=None, prefetch=300): + """ + Add a receiver to the client for a particular consumer group and partition. + + :param consumer_group: The name of the consumer group. + :type consumer_group: str + :param partition: The ID of the partition. + :type partition: str + :param offset: The offset from which to start receiving. + :type offset: ~azure.eventhub.Offset + :param prefetch: The message prefetch count of the receiver. Default is 300. + :type prefetch: int + :rtype: ~azure.eventhub.Receiver + """ + source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( + self.address.hostname, self.address.path, consumer_group, partition) + source = Source(source_url) + if offset is not None: + source.set_filter(offset.selector()) + handler = Receiver(self, source, prefetch=prefetch) + self.clients.append(handler._handler) # pylint: disable=protected-access + return handler + + def add_epoch_receiver(self, consumer_group, partition, epoch, prefetch=300): + """ + Add a receiver to the client with an epoch value. Only a single epoch receiver + can connect to a partition at any given time - additional epoch receivers must have + a higher epoch value or they will be rejected. If a 2nd epoch receiver has + connected, the first will be closed. + + :param consumer_group: The name of the consumer group. + :type consumer_group: str + :param partition: The ID of the partition. + :type partition: str + :param epoch: The epoch value for the receiver. + :type epoch: int + :param prefetch: The message prefetch count of the receiver. Default is 300. + :type prefetch: int + :rtype: ~azure.eventhub.Receiver + """ + source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( + self.address.hostname, self.address.path, consumer_group, partition) + handler = Receiver(self, source_url, prefetch=prefetch, epoch=epoch) + self.clients.append(handler._handler) # pylint: disable=protected-access + return handler + + def add_sender(self, partition=None): + """ + Add a sender to the client to send ~azure.eventhub.EventData object + to an EventHub. + + :param partition: Optionally specify a particular partition to send to. + If omitted, the events will be distributed to available partitions via + round-robin. + :type parition: str + :rtype: ~azure.eventhub.Sender + """ + target = "amqps://{}{}".format(self.address.hostname, self.address.path) + handler = Sender(self, target, partition=partition) + self.clients.append(handler._handler) # pylint: disable=protected-access + return handler diff --git a/azure/eventhub/common.py b/azure/eventhub/common.py new file mode 100644 index 0000000..f0a7f92 --- /dev/null +++ b/azure/eventhub/common.py @@ -0,0 +1,207 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import datetime +import time + +from uamqp import Message, BatchMessage +from uamqp import types +from uamqp.message import MessageHeader + + +class EventData(object): + """ + The EventData class is a holder of event content. + Acts as a wrapper to an ~uamqp.message.Message object. + """ + + PROP_SEQ_NUMBER = b"x-opt-sequence-number" + PROP_OFFSET = b"x-opt-offset" + PROP_PARTITION_KEY = b"x-opt-partition-key" + PROP_TIMESTAMP = b"x-opt-enqueued-time" + PROP_DEVICE_ID = b"iothub-connection-device-id" + + def __init__(self, body=None, batch=None, message=None): + """ + Initialize EventData. + + :param body: The data to send in a single message. + :type body: str, bytes or list + :param batch: A data generator to send batched messages. + :type batch: Generator + :param message: The received message. + :type message: ~uamqp.message.Message + """ + self._partition_key = types.AMQPSymbol(EventData.PROP_PARTITION_KEY) + self._annotations = {} + self._properties = {} + if batch: + self.message = BatchMessage(data=batch, multi_messages=True) + elif message: + self.message = message + self._annotations = message.annotations + self._properties = message.application_properties + else: + if isinstance(body, list) and body: + self.message = Message(body[0]) + for more in body[1:]: + self.message._body.append(more) # pylint: disable=protected-access + elif body is None: + raise ValueError("EventData cannot be None.") + else: + self.message = Message(body) + + + @property + def sequence_number(self): + """ + The sequence number of the event data object. + + :rtype: int + """ + return self._annotations.get(EventData.PROP_SEQ_NUMBER, None) + + @property + def offset(self): + """ + The offset of the event data object. + + :rtype: int + """ + try: + return self._annotations[EventData.PROP_OFFSET].decode('UTF-8') + except (KeyError, AttributeError): + return None + + @property + def enqueued_time(self): + """ + The enqueued timestamp of the event data object. + + :rtype: datetime.datetime + """ + timestamp = self._annotations.get(EventData.PROP_TIMESTAMP, None) + if timestamp: + return datetime.datetime.fromtimestamp(float(timestamp)/1000) + return None + + @property + def device_id(self): + """ + The device ID of the event data object. This is only used for + IoT Hub implementations. + + :rtype: bytes + """ + return self._annotations.get(EventData.PROP_DEVICE_ID, None) + + @property + def partition_key(self): + """ + The partition key of the event data object. + + :rtype: bytes + """ + try: + return self._annotations[self._partition_key] + except KeyError: + return self._annotations.get(EventData.PROP_PARTITION_KEY, None) + + @partition_key.setter + def partition_key(self, value): + """ + Set the partition key of the event data object. + + :param value: The partition key to set. + :type value: str or bytes + """ + annotations = dict(self._annotations) + annotations[self._partition_key] = value + header = MessageHeader() + header.durable = True + self.message.annotations = annotations + self.message.header = header + self._annotations = annotations + + @property + def properties(self): + """ + Application defined properties on the message. + + :rtype: dict + """ + return self._properties + + @properties.setter + def properties(self, value): + """ + Application defined properties on the message. + + :param value: The application properties for the EventData. + :type value: dict + """ + self._properties = value + properties = dict(self._properties) + self.message.application_properties = properties + + @property + def body(self): + """ + The body of the event data object. + + :rtype: bytes or generator[bytes] + """ + return self.message.get_data() + + +class Offset(object): + """ + The offset (position or timestamp) where a receiver starts. Examples: + Beginning of the event stream: + >>> offset = Offset("-1") + End of the event stream: + >>> offset = Offset("@latest") + Events after the specified offset: + >>> offset = Offset("12345") + Events from the specified offset: + >>> offset = Offset("12345", True) + Events after a datetime: + >>> offset = Offset(datetime.datetime.utcnow()) + Events after a specific sequence number: + >>> offset = Offset(1506968696002) + """ + + def __init__(self, value, inclusive=False): + """ + Initialize Offset. + + :param value: The offset value. + :type value: ~datetime.datetime or int or str + :param inclusive: Whether to include the supplied value as the start point. + :type inclusive: bool + """ + self.value = value + self.inclusive = inclusive + + def selector(self): + """ + Creates a selector expression of the offset. + + :rtype: bytes + """ + operator = ">=" if self.inclusive else ">" + if isinstance(self.value, datetime.datetime): + timestamp = (time.mktime(self.value.timetuple()) * 1000) + (self.value.microsecond/1000) + return ("amqp.annotation.x-opt-enqueued-time {} '{}'".format(operator, int(timestamp))).encode('utf-8') + elif isinstance(self.value, int): + return ("amqp.annotation.x-opt-sequence-number {} '{}'".format(operator, self.value)).encode('utf-8') + return ("amqp.annotation.x-opt-offset {} '{}'".format(operator, self.value)).encode('utf-8') + + +class EventHubError(Exception): + """ + Represents an error happened in the client. + """ + pass diff --git a/azure/eventhub/receiver.py b/azure/eventhub/receiver.py new file mode 100644 index 0000000..c332bca --- /dev/null +++ b/azure/eventhub/receiver.py @@ -0,0 +1,105 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +from uamqp import types, errors +from uamqp import ReceiveClient + +from azure.eventhub.common import EventHubError, EventData, Offset + + +class Receiver: + """ + Implements a Receiver. + """ + timeout = 0 + _epoch = b'com.microsoft:epoch' + + def __init__(self, client, source, prefetch=300, epoch=None): + """ + Instantiate a receiver. + + :param client: The parent EventHubClient. + :type client: ~azure.eventhub.EventHubClient + :param source: The source EventHub from which to receive events. + :type source: ~uamqp.address.Source + :param prefetch: The number of events to prefetch from the service + for processing. Default is 300. + :type prefetch: int + :param epoch: An optional epoch value. + :type epoch: int + """ + self.offset = None + self.prefetch = prefetch + self.epoch = epoch + properties = None + if epoch: + properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))} + self._handler = ReceiveClient( + source, + auth=client.auth, + debug=client.debug, + prefetch=self.prefetch, + link_properties=properties, + timeout=self.timeout) + + @property + def queue_size(self): + """ + The current size of the unprocessed message queue. + + :rtype: int + """ + # pylint: disable=protected-access + if self._handler._received_messages: + return self._handler._received_messages.qsize() + return 0 + + def receive(self, max_batch_size=None, timeout=None): + """ + Receive events from the EventHub. + + :param max_batch_size: Receive a batch of events. Batch size will + be up to the maximum specified, but will return as soon as service + returns no new events. If combined with a timeout and no events are + retrieve before the time, the result will be empty. If no batch + size is supplied, the prefetch size will be the maximum. + :type max_batch_size: int + :rtype: list[~azure.eventhub.EventData] + """ + try: + timeout_ms = 1000 * timeout if timeout else 0 + message_batch = self._handler.receive_message_batch( + max_batch_size=max_batch_size, + timeout=timeout_ms) + data_batch = [] + for message in message_batch: + event_data = EventData(message=message) + self.offset = event_data.offset + data_batch.append(event_data) + return data_batch + except errors.AMQPConnectionError as e: + message = "Failed to open receiver: {}".format(e) + message += "\nPlease check that the partition key is valid " + if self.epoch: + message += ("and that a higher epoch receiver is not " + "already running for this partition.") + else: + message += ("and whether an epoch receiver is " + "already running for this partition.") + raise EventHubError(message) + except Exception as e: + raise EventHubError("Receive failed: {}".format(e)) + + def selector(self, default): + """ + Create a selector for the current offset if it is set. + + :param default: The fallback receive offset. + :type default: ~azure.eventhub.Offset + :rtype: ~azure.eventhub.Offset + """ + if self.offset is not None: + return Offset(self.offset).selector() + return default diff --git a/azure/eventhub/sender.py b/azure/eventhub/sender.py new file mode 100644 index 0000000..78c2195 --- /dev/null +++ b/azure/eventhub/sender.py @@ -0,0 +1,99 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +from uamqp import constants +from uamqp import SendClient + +from azure.eventhub.common import EventHubError + + +class Sender: + """ + Implements a Sender. + """ + TIMEOUT = 60.0 + + def __init__(self, client, target, partition=None): + """ + Instantiate an EventHub event Sender client. + + :param client: The parent EventHubClient. + :type client: ~azure.eventhub.EventHubClient. + :param target: The URI of the EventHub to send to. + :type target: str + """ + self.partition = partition + if partition: + target += "/Partitions/" + partition + self._handler = SendClient( + target, + auth=client.auth, + debug=client.debug, + msg_timeout=Sender.TIMEOUT) + self._outcome = None + self._condition = None + + def send(self, event_data): + """ + Sends an event data and blocks until acknowledgement is + received or operation times out. + + :param event_data: The event to be sent. + :type event_data: ~azure.eventhub.client.EventData + :raises: ~azure.eventhub.client.EventHubError if the message fails to + send. + :return: The outcome of the message send. + :rtype: ~uamqp.constants.MessageSendResult + """ + if event_data.partition_key and self.partition: + raise ValueError("EventData partition key cannot be used with a partition sender.") + event_data.message.on_send_complete = self._on_outcome + try: + self._handler.send_message(event_data.message) + if self._outcome != constants.MessageSendResult.Ok: + raise Sender._error(self._outcome, self._condition) + except Exception as e: + raise EventHubError("Send failed: {}".format(e)) + else: + return self._outcome + + def transfer(self, event_data, callback=None): + """ + Transfers an event data and notifies the callback when the operation is done. + + :param event_data: The event to be sent. + :type event_data: ~azure.eventhub.client.EventData + :param callback: Callback to be run once the message has been send. + This must be a function that accepts two arguments. + :type callback: func[~uamqp.constants.MessageSendResult, ~azure.eventhub.client.EventHubError] + """ + if event_data.partition_key and self.partition: + raise ValueError("EventData partition key cannot be used with a partition sender.") + if callback: + event_data.message.on_send_complete = lambda o, c: callback(o, Sender._error(o, c)) + self._handler.queue_message(event_data.message) + + def wait(self): + """ + Wait until all transferred events have been sent. + """ + try: + self._handler.wait() + except Exception as e: + raise EventHubError("Send failed: {}".format(e)) + + def _on_outcome(self, outcome, condition): + """ + Called when the outcome is received for a delivery. + + :param outcome: The outcome of the message delivery - success or failure. + :type outcome: ~uamqp.constants.MessageSendResult + """ + self._outcome = outcome + self._condition = condition + + @staticmethod + def _error(outcome, condition): + return None if outcome == constants.MessageSendResult.Ok else EventHubError(outcome, condition) diff --git a/azure/eventprocessorhost/eh_partition_pump.py b/azure/eventprocessorhost/eh_partition_pump.py index 738422d..1c1b813 100644 --- a/azure/eventprocessorhost/eh_partition_pump.py +++ b/azure/eventprocessorhost/eh_partition_pump.py @@ -5,8 +5,7 @@ import logging import asyncio -from azure.eventhub import Offset -from azure.eventhub.async import EventHubClientAsync +from azure.eventhub import Offset, EventHubClientAsync from azure.eventprocessorhost.partition_pump import PartitionPump diff --git a/azure/eventprocessorhost/partition_manager.py b/azure/eventprocessorhost/partition_manager.py index 2580abc..2ae402e 100644 --- a/azure/eventprocessorhost/partition_manager.py +++ b/azure/eventprocessorhost/partition_manager.py @@ -8,7 +8,7 @@ from queue import Queue from collections import Counter -from azure.eventhub.async import EventHubClientAsync +from azure.eventhub import EventHubClientAsync from azure.eventprocessorhost.eh_partition_pump import EventHubPartitionPump from azure.eventprocessorhost.cancellation_token import CancellationToken diff --git a/examples/recv_async.py b/examples/recv_async.py index 13b4c76..d025bc9 100644 --- a/examples/recv_async.py +++ b/examples/recv_async.py @@ -14,8 +14,7 @@ import time import logging import asyncio -from azure.eventhub import Offset -from azure.eventhub.async import EventHubClientAsync, AsyncReceiver +from azure.eventhub import Offset, EventHubClientAsync, AsyncReceiver import examples logger = examples.get_logger(logging.INFO) diff --git a/examples/recv_epoch.py b/examples/recv_epoch.py index cd28751..f9f291e 100644 --- a/examples/recv_epoch.py +++ b/examples/recv_epoch.py @@ -14,8 +14,7 @@ import time import logging import asyncio -from azure.eventhub import Offset -from azure.eventhub.async import EventHubClientAsync, AsyncReceiver +from azure.eventhub import Offset, EventHubClientAsync, AsyncReceiver import examples logger = examples.get_logger(logging.INFO) diff --git a/examples/send_async.py b/examples/send_async.py index fa5e5d1..96a6ce1 100644 --- a/examples/send_async.py +++ b/examples/send_async.py @@ -12,8 +12,7 @@ import asyncio import os -from azure.eventhub import EventData -from azure.eventhub.async import EventHubClientAsync, AsyncSender +from azure.eventhub import EventData, EventHubClientAsync, AsyncSender import examples logger = examples.get_logger(logging.INFO) diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 3480374..0000000 --- a/setup.cfg +++ /dev/null @@ -1,2 +0,0 @@ -[bdist_wheel] -universal=1 \ No newline at end of file diff --git a/tests/test_negative.py b/tests/test_negative.py index 1acb8a3..2eaff40 100644 --- a/tests/test_negative.py +++ b/tests/test_negative.py @@ -9,8 +9,12 @@ import pytest from azure import eventhub -from azure.eventhub import EventData, Offset, EventHubError, EventHubClient -from azure.eventhub.async import EventHubClientAsync +from azure.eventhub import ( + EventHubClientAsync, + EventData, + Offset, + EventHubError, + EventHubClient) def test_send_partition_key_with_partition(connection_str): diff --git a/tests/test_receive_async.py b/tests/test_receive_async.py index b8347f6..68a87ef 100644 --- a/tests/test_receive_async.py +++ b/tests/test_receive_async.py @@ -10,8 +10,7 @@ import time from azure import eventhub -from azure.eventhub import EventData, Offset, EventHubError -from azure.eventhub.async import EventHubClientAsync +from azure.eventhub import EventData, Offset, EventHubError, EventHubClientAsync @pytest.mark.asyncio @@ -192,6 +191,7 @@ async def pump(receiver, sleep=None): @pytest.mark.asyncio async def test_epoch_receiver_async(connection_str, senders): + pytest.skip("") client = EventHubClientAsync.from_connection_string(connection_str, debug=False) receivers = [] for epoch in [10, 20]: @@ -237,6 +237,7 @@ async def test_multiple_receiver_async(connection_str, senders): @pytest.mark.asyncio async def test_epoch_receiver_after_non_epoch_receiver_async(connection_str, senders): + pytest.skip("") client = EventHubClientAsync.from_connection_string(connection_str, debug=False) receivers = [] receivers.append(client.add_async_receiver("$default", "0", prefetch=1000)) diff --git a/tests/test_send_async.py b/tests/test_send_async.py index af20fc7..ede8b73 100644 --- a/tests/test_send_async.py +++ b/tests/test_send_async.py @@ -9,9 +9,7 @@ import pytest import time -from azure import eventhub -from azure.eventhub import EventData -from azure.eventhub.async import EventHubClientAsync +from azure.eventhub import EventData, EventHubClientAsync @pytest.mark.asyncio From 9877eddd4f5df6ea19467207d714aaf254a8fa14 Mon Sep 17 00:00:00 2001 From: annatisch Date: Wed, 4 Jul 2018 10:52:49 -0700 Subject: [PATCH 08/11] IoThub support --- azure/eventhub/_async/__init__.py | 89 +++++++-- azure/eventhub/_async/receiver_async.py | 72 +++++-- azure/eventhub/_async/sender_async.py | 60 +++++- azure/eventhub/common.py | 13 +- azure/eventhub/receiver.py | 82 ++++++-- azure/eventhub/sender.py | 72 ++++++- .../abstract_event_processor.py | 2 +- azure/eventprocessorhost/eh_partition_pump.py | 2 +- azure/eventprocessorhost/partition_context.py | 4 +- azure/eventprocessorhost/partition_pump.py | 2 +- conftest.py | 45 ++++- examples/eph.py | 2 +- tests/__init__.py | 2 +- tests/test_iothub_receive.py | 22 +++ tests/test_iothub_receive_async.py | 66 +++++++ tests/test_iothub_send.py | 28 +++ tests/test_negative.py | 181 +++++++++++------- tests/test_receive_async.py | 42 ++-- 18 files changed, 612 insertions(+), 174 deletions(-) create mode 100644 tests/test_iothub_receive.py create mode 100644 tests/test_iothub_receive_async.py create mode 100644 tests/test_iothub_send.py diff --git a/azure/eventhub/_async/__init__.py b/azure/eventhub/_async/__init__.py index 5831051..e80d78a 100644 --- a/azure/eventhub/_async/__init__.py +++ b/azure/eventhub/_async/__init__.py @@ -8,8 +8,7 @@ import time import datetime -from uamqp import constants, types, errors -from uamqp.authentication import SASTokenAsync +from uamqp import authentication, constants, types, errors from uamqp import ( Message, Source, @@ -50,7 +49,9 @@ def _create_auth(self, auth_uri, username, password): # pylint: disable=no-self :param password: The shared access key. :type password: str """ - return SASTokenAsync.from_shared_access_key(auth_uri, username, password) + if "@sas.root" in username: + return authentication.SASLPlain(self.address.hostname, username, password) + return authentication.SASTokenAsync.from_shared_access_key(auth_uri, username, password) def _create_connection_async(self): """ @@ -79,21 +80,71 @@ async def _close_clients_async(self): """ Close all open AsyncSender/AsyncReceiver clients. """ - for client in self.clients: - await client.close_async() + await asyncio.gather(*[c.close_async() for c in self.clients]) + + async def _wait_for_client(self, client): + try: + while client.get_handler_state().value == 2: + await self.connection.work_async() + except Exception as exp: + await client.close_async(exception=exp) + + async def _start_client_async(self, client): + try: + await client.open_async(self.connection) + started = await client.has_started() + while not started: + await self.connection.work_async() + started = await client.has_started() + except Exception as exp: + await client.close_async(exception=exp) + + async def _handle_redirect(self, redirects): + if len(redirects) != len(self.clients): + not_redirected = [c for c in self.clients if not c.redirected] + done, timeout = await asyncio.wait([self._wait_for_client(c) for c in not_redirected], timeout=5) + if timeout: + raise EventHubError("Some clients are attempting to redirect the connection.") + redirects = [c.redirected for c in self.clients if c.redirected] + if not all(r.hostname == redirects[0].hostname for r in redirects): + raise EventHubError("Multiple clients attempting to redirect to different hosts.") + self.auth = self._create_auth(redirects[0].address.decode('utf-8'), **self._auth_config) + await self.connection.redirect_async(redirects[0], self.auth) + await asyncio.gather(*[c.open_async(self.connection) for c in self.clients]) async def run_async(self): """ Run the EventHubClient asynchronously. Opens the connection and starts running all AsyncSender/AsyncReceiver clients. + Returns a list of the start up results. For a succcesful client start the + result will be `None`, otherwise the exception raise. + If all clients failed to start, the run will fail, shut down the connection + and raise an exception. + If at least one client starts up successfully the run command will succeed. - :rtype: ~azure.eventhub._async.EventHubClientAsync + :rtype: list[~azure.eventhub.common.EventHubError] """ log.info("{}: Starting {} clients".format(self.container_id, len(self.clients))) self._create_connection_async() - for client in self.clients: - await client.open_async(connection=self.connection) - return self + tasks = [self._start_client_async(c) for c in self.clients] + try: + await asyncio.gather(*tasks) + redirects = [c.redirected for c in self.clients if c.redirected] + failed = [c.error for c in self.clients if c.error] + if failed and len(failed) == len(self.clients): + log.warning("{}: All clients failed to start.".format(self.container_id, len(failed))) + raise failed[0] + elif failed: + log.warning("{}: {} clients failed to start.".format(self.container_id, len(failed))) + elif redirects: + await self._handle_redirect(redirects) + except EventHubError: + await self.stop_async() + raise + except Exception as exp: + await self.stop_async() + raise EventHubError(str(exp)) + return failed async def stop_async(self): """ @@ -130,7 +181,7 @@ async def get_eventhub_info_async(self): output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']] return output - def add_async_receiver(self, consumer_group, partition, offset=None, prefetch=300, loop=None): + def add_async_receiver(self, consumer_group, partition, offset=None, prefetch=300, operation=None, loop=None): """ Add an async receiver to the client for a particular consumer group and partition. @@ -144,16 +195,17 @@ def add_async_receiver(self, consumer_group, partition, offset=None, prefetch=30 :type prefetch: int :rtype: ~azure.eventhub._async.receiver_async.ReceiverAsync """ + path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( - self.address.hostname, self.address.path, consumer_group, partition) + self.address.hostname, path, consumer_group, partition) source = Source(source_url) if offset is not None: source.set_filter(offset.selector()) handler = AsyncReceiver(self, source, prefetch=prefetch, loop=loop) - self.clients.append(handler._handler) # pylint: disable=protected-access + self.clients.append(handler) return handler - def add_async_epoch_receiver(self, consumer_group, partition, epoch, prefetch=300, loop=None): + def add_async_epoch_receiver(self, consumer_group, partition, epoch, prefetch=300, operation=None, loop=None): """ Add an async receiver to the client with an epoch value. Only a single epoch receiver can connect to a partition at any given time - additional epoch receivers must have @@ -170,13 +222,14 @@ def add_async_epoch_receiver(self, consumer_group, partition, epoch, prefetch=30 :type prefetch: int :rtype: ~azure.eventhub._async.receiver_async.ReceiverAsync """ + path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( - self.address.hostname, self.address.path, consumer_group, partition) + self.address.hostname, path, consumer_group, partition) handler = AsyncReceiver(self, source_url, prefetch=prefetch, epoch=epoch, loop=loop) - self.clients.append(handler._handler) # pylint: disable=protected-access + self.clients.append(handler) return handler - def add_async_sender(self, partition=None, loop=None): + def add_async_sender(self, partition=None, operation=None, loop=None): """ Add an async sender to the client to send ~azure.eventhub.common.EventData object to an EventHub. @@ -188,6 +241,8 @@ def add_async_sender(self, partition=None, loop=None): :rtype: ~azure.eventhub._async.sender_async.SenderAsync """ target = "amqps://{}{}".format(self.address.hostname, self.address.path) + if operation: + target = target + operation handler = AsyncSender(self, target, partition=partition, loop=loop) - self.clients.append(handler._handler) # pylint: disable=protected-access + self.clients.append(handler) return handler diff --git a/azure/eventhub/_async/receiver_async.py b/azure/eventhub/_async/receiver_async.py index 3c044d4..3506fdf 100644 --- a/azure/eventhub/_async/receiver_async.py +++ b/azure/eventhub/_async/receiver_async.py @@ -33,21 +33,65 @@ def __init__(self, client, source, prefetch=300, epoch=None, loop=None): # pyli :param loop: An event loop. """ self.loop = loop or asyncio.get_event_loop() + self.redirected = None + self.error = None + self.debug = client.debug self.offset = None self.prefetch = prefetch + self.properties = None self.epoch = epoch properties = None if epoch: - properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))} + self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))} self._handler = ReceiveClientAsync( source, auth=client.auth, - debug=client.debug, + debug=self.debug, prefetch=self.prefetch, - link_properties=properties, + link_properties=self.properties, timeout=self.timeout, loop=self.loop) + async def open_async(self, connection): + if self.redirected: + self._handler = ReceiveClientAsync( + self.redirected.address, + auth=None, + debug=self.debug, + prefetch=self.prefetch, + link_properties=self.properties, + timeout=self.timeout, + loop=self.loop) + await self._handler.open_async(connection=connection) + + async def has_started(self): + # pylint: disable=protected-access + timeout = False + auth_in_progress = False + if self._handler._connection.cbs: + timeout, auth_in_progress = await self._handler._auth.handle_token_async() + if timeout: + raise EventHubError("Authorization timeout.") + elif auth_in_progress: + return False + elif not await self._handler._client_ready(): + return False + else: + return True + + async def close_async(self, exception=None): + if self.error: + return + elif isinstance(exception, errors.LinkRedirect): + self.redirected = exception + elif isinstance(exception, EventHubError): + self.error = exception + elif exception: + self.error = EventHubError(str(exception)) + else: + self.error = EventHubError("This receive client is now closed.") + await self._handler.close_async() + async def receive(self, max_batch_size=None, timeout=None): """ Receive events asynchronously from the EventHub. @@ -58,8 +102,10 @@ async def receive(self, max_batch_size=None, timeout=None): retrieve before the time, the result will be empty. If no batch size is supplied, the prefetch size will be the maximum. :type max_batch_size: int - :rtype: list[~azure.eventhub.EventData] + :rtype: list[~azure.eventhub.common.EventData] """ + if self.error: + raise self.error try: timeout_ms = 1000 * timeout if timeout else 0 message_batch = await self._handler.receive_message_batch_async( @@ -71,15 +117,11 @@ async def receive(self, max_batch_size=None, timeout=None): self.offset = event_data.offset data_batch.append(event_data) return data_batch - except errors.AMQPConnectionError as e: - message = "Failed to open receiver: {}".format(e) - message += "\nPlease check that the partition key is valid " - if self.epoch: - message += "and that a higher epoch receiver is " \ - "not already running for this partition." - else: - message += "and whether an epoch receiver is " \ - "already running for this partition." - raise EventHubError(message) + except errors.LinkDetach as detach: + error = EventHubError(str(detach)) + await self.close_async(exception=error) + raise error except Exception as e: - raise EventHubError("Receive failed: {}".format(e)) + error = EventHubError("Receive failed: {}".format(e)) + await self.close_async(exception=error) + raise error diff --git a/azure/eventhub/_async/sender_async.py b/azure/eventhub/_async/sender_async.py index b702162..27bce63 100644 --- a/azure/eventhub/_async/sender_async.py +++ b/azure/eventhub/_async/sender_async.py @@ -5,7 +5,7 @@ import asyncio -from uamqp import constants +from uamqp import constants, errors from uamqp import SendClientAsync from azure.eventhub import EventHubError @@ -26,6 +26,9 @@ def __init__(self, client, target, partition=None, loop=None): # pylint: disabl :type target: str :param loop: An event loop. """ + self.redirected = None + self.error = None + self.debug = client.debug self.partition = partition if partition: target += "/Partitions/" + partition @@ -33,22 +36,61 @@ def __init__(self, client, target, partition=None, loop=None): # pylint: disabl self._handler = SendClientAsync( target, auth=client.auth, - debug=client.debug, + debug=self.debug, msg_timeout=Sender.TIMEOUT, loop=self.loop) self._outcome = None self._condition = None + async def open_async(self, connection): + if self.redirected: + self._handler = SendClientAsync( + self.redirected.address, + auth=None, + debug=self.debug, + msg_timeout=Sender.TIMEOUT) + await self._handler.open_async(connection=connection) + + async def has_started(self): + # pylint: disable=protected-access + timeout = False + auth_in_progress = False + if self._handler._connection.cbs: + timeout, auth_in_progress = await self._handler._auth.handle_token_async() + if timeout: + raise EventHubError("Authorization timeout.") + elif auth_in_progress: + return False + elif not await self._handler._client_ready(): + return False + else: + return True + + async def close_async(self, exception=None): + if self.error: + return + elif isinstance(exception, errors.LinkRedirect): + self.redirected = exception + elif isinstance(exception, EventHubError): + self.error = exception + elif exception: + self.error = EventHubError(str(exception)) + else: + self.error = EventHubError("This send client is now closed.") + await self._handler.close_async() + async def send(self, event_data): """ Sends an event data and asynchronously waits until acknowledgement is received or operation times out. :param event_data: The event to be sent. - :type event_data: ~azure.eventhub.EventData - :raises: ~azure.eventhub.EventHubError if the message fails to + :type event_data: ~azure.eventhub.common.EventData + :raises: ~azure.eventhub.common.EventHubError if the message fails to send. """ + if self.error: + raise self.error if event_data.partition_key and self.partition: raise ValueError("EventData partition key cannot be used with a partition sender.") event_data.message.on_send_complete = self._on_outcome @@ -56,5 +98,13 @@ async def send(self, event_data): await self._handler.send_message_async(event_data.message) if self._outcome != constants.MessageSendResult.Ok: raise Sender._error(self._outcome, self._condition) + except errors.LinkDetach as detach: + error = EventHubError(str(detach)) + await self.close_async(exception=error) + raise error except Exception as e: - raise EventHubError("Send failed: {}".format(e)) + error = EventHubError("Send failed: {}".format(e)) + await self.close_async(exception=error) + raise error + else: + return self._outcome diff --git a/azure/eventhub/common.py b/azure/eventhub/common.py index f0a7f92..f528bba 100644 --- a/azure/eventhub/common.py +++ b/azure/eventhub/common.py @@ -8,7 +8,7 @@ from uamqp import Message, BatchMessage from uamqp import types -from uamqp.message import MessageHeader +from uamqp.message import MessageHeader, MessageProperties class EventData(object): @@ -23,7 +23,7 @@ class EventData(object): PROP_TIMESTAMP = b"x-opt-enqueued-time" PROP_DEVICE_ID = b"iothub-connection-device-id" - def __init__(self, body=None, batch=None, message=None): + def __init__(self, body=None, batch=None, to_device=None, message=None): """ Initialize EventData. @@ -37,21 +37,24 @@ def __init__(self, body=None, batch=None, message=None): self._partition_key = types.AMQPSymbol(EventData.PROP_PARTITION_KEY) self._annotations = {} self._properties = {} + self._msg_properties = MessageProperties() + if to_device: + self._msg_properties.to = '/devices/{}/messages/devicebound'.format(to_device) if batch: - self.message = BatchMessage(data=batch, multi_messages=True) + self.message = BatchMessage(data=batch, multi_messages=True, properties=self._msg_properties) elif message: self.message = message self._annotations = message.annotations self._properties = message.application_properties else: if isinstance(body, list) and body: - self.message = Message(body[0]) + self.message = Message(body[0], properties=self._msg_properties) for more in body[1:]: self.message._body.append(more) # pylint: disable=protected-access elif body is None: raise ValueError("EventData cannot be None.") else: - self.message = Message(body) + self.message = Message(body, properties=self._msg_properties) @property diff --git a/azure/eventhub/receiver.py b/azure/eventhub/receiver.py index c332bca..beec103 100644 --- a/azure/eventhub/receiver.py +++ b/azure/eventhub/receiver.py @@ -21,7 +21,7 @@ def __init__(self, client, source, prefetch=300, epoch=None): Instantiate a receiver. :param client: The parent EventHubClient. - :type client: ~azure.eventhub.EventHubClient + :type client: ~azure.eventhub.client.EventHubClient :param source: The source EventHub from which to receive events. :type source: ~uamqp.address.Source :param prefetch: The number of events to prefetch from the service @@ -33,17 +33,63 @@ def __init__(self, client, source, prefetch=300, epoch=None): self.offset = None self.prefetch = prefetch self.epoch = epoch - properties = None + self.properties = None + self.redirected = None + self.debug = client.debug + self.error = None if epoch: - properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))} + self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))} self._handler = ReceiveClient( source, auth=client.auth, - debug=client.debug, + debug=self.debug, prefetch=self.prefetch, - link_properties=properties, + link_properties=self.properties, timeout=self.timeout) + def open(self, connection): + if self.redirected: + self._handler = ReceiveClient( + self.redirected.address, + auth=None, + debug=self.debug, + prefetch=self.prefetch, + link_properties=self.properties, + timeout=self.timeout) + self._handler.open(connection) + + def get_handler_state(self): + # pylint: disable=protected-access + return self._handler._message_receiver.get_state() + + def has_started(self): + # pylint: disable=protected-access + timeout = False + auth_in_progress = False + if self._handler._connection.cbs: + timeout, auth_in_progress = self._handler._auth.handle_token() + if timeout: + raise EventHubError("Authorization timeout.") + elif auth_in_progress: + return False + elif not self._handler._client_ready(): + return False + else: + return True + + def close(self, exception=None): + if self.error: + return + elif isinstance(exception, errors.LinkRedirect): + self.redirected = exception + elif isinstance(exception, EventHubError): + self.error = exception + elif exception: + self.error = EventHubError(str(exception)) + else: + self.error = EventHubError("This receive client is now closed.") + self._handler.close() + @property def queue_size(self): """ @@ -66,8 +112,10 @@ def receive(self, max_batch_size=None, timeout=None): retrieve before the time, the result will be empty. If no batch size is supplied, the prefetch size will be the maximum. :type max_batch_size: int - :rtype: list[~azure.eventhub.EventData] + :rtype: list[~azure.eventhub.common.EventData] """ + if self.error: + raise self.error try: timeout_ms = 1000 * timeout if timeout else 0 message_batch = self._handler.receive_message_batch( @@ -79,26 +127,22 @@ def receive(self, max_batch_size=None, timeout=None): self.offset = event_data.offset data_batch.append(event_data) return data_batch - except errors.AMQPConnectionError as e: - message = "Failed to open receiver: {}".format(e) - message += "\nPlease check that the partition key is valid " - if self.epoch: - message += ("and that a higher epoch receiver is not " - "already running for this partition.") - else: - message += ("and whether an epoch receiver is " - "already running for this partition.") - raise EventHubError(message) + except errors.LinkDetach as detach: + error = EventHubError(str(detach)) + self.close(exception=error) + raise error except Exception as e: - raise EventHubError("Receive failed: {}".format(e)) + error = EventHubError("Receive failed: {}".format(e)) + self.close(exception=error) + raise error def selector(self, default): """ Create a selector for the current offset if it is set. :param default: The fallback receive offset. - :type default: ~azure.eventhub.Offset - :rtype: ~azure.eventhub.Offset + :type default: ~azure.eventhub.common.Offset + :rtype: ~azure.eventhub.common.Offset """ if self.offset is not None: return Offset(self.offset).selector() diff --git a/azure/eventhub/sender.py b/azure/eventhub/sender.py index 78c2195..0a0e08c 100644 --- a/azure/eventhub/sender.py +++ b/azure/eventhub/sender.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -from uamqp import constants +from uamqp import constants, errors from uamqp import SendClient from azure.eventhub.common import EventHubError @@ -20,33 +20,79 @@ def __init__(self, client, target, partition=None): Instantiate an EventHub event Sender client. :param client: The parent EventHubClient. - :type client: ~azure.eventhub.EventHubClient. + :type client: ~azure.eventhub.client.EventHubClient. :param target: The URI of the EventHub to send to. :type target: str """ + self.redirected = None + self.error = None + self.debug = client.debug self.partition = partition if partition: target += "/Partitions/" + partition self._handler = SendClient( target, auth=client.auth, - debug=client.debug, + debug=self.debug, msg_timeout=Sender.TIMEOUT) self._outcome = None self._condition = None + def open(self, connection): + if self.redirected: + self._handler = SendClient( + self.redirected.address, + auth=None, + debug=self.debug, + msg_timeout=Sender.TIMEOUT) + self._handler.open(connection) + + def get_handler_state(self): + # pylint: disable=protected-access + return self._handler._message_sender.get_state() + + def has_started(self): + # pylint: disable=protected-access + timeout = False + auth_in_progress = False + if self._handler._connection.cbs: + timeout, auth_in_progress = self._handler._auth.handle_token() + if timeout: + raise EventHubError("Authorization timeout.") + elif auth_in_progress: + return False + elif not self._handler._client_ready(): + return False + else: + return True + + def close(self, exception=None): + if self.error: + return + elif isinstance(exception, errors.LinkRedirect): + self.redirected = exception + elif isinstance(exception, EventHubError): + self.error = exception + elif exception: + self.error = EventHubError(str(exception)) + else: + self.error = EventHubError("This send client is now closed.") + self._handler.close() + def send(self, event_data): """ Sends an event data and blocks until acknowledgement is received or operation times out. :param event_data: The event to be sent. - :type event_data: ~azure.eventhub.client.EventData - :raises: ~azure.eventhub.client.EventHubError if the message fails to + :type event_data: ~azure.eventhub.common.EventData + :raises: ~azure.eventhub.common.EventHubError if the message fails to send. :return: The outcome of the message send. :rtype: ~uamqp.constants.MessageSendResult """ + if self.error: + raise self.error if event_data.partition_key and self.partition: raise ValueError("EventData partition key cannot be used with a partition sender.") event_data.message.on_send_complete = self._on_outcome @@ -54,8 +100,14 @@ def send(self, event_data): self._handler.send_message(event_data.message) if self._outcome != constants.MessageSendResult.Ok: raise Sender._error(self._outcome, self._condition) + except errors.LinkDetach as detach: + error = EventHubError(str(detach)) + self.close(exception=error) + raise error except Exception as e: - raise EventHubError("Send failed: {}".format(e)) + error = EventHubError("Send failed: {}".format(e)) + self.close(exception=error) + raise error else: return self._outcome @@ -64,11 +116,13 @@ def transfer(self, event_data, callback=None): Transfers an event data and notifies the callback when the operation is done. :param event_data: The event to be sent. - :type event_data: ~azure.eventhub.client.EventData + :type event_data: ~azure.eventhub.common.EventData :param callback: Callback to be run once the message has been send. This must be a function that accepts two arguments. - :type callback: func[~uamqp.constants.MessageSendResult, ~azure.eventhub.client.EventHubError] + :type callback: func[~uamqp.constants.MessageSendResult, ~azure.eventhub.common.EventHubError] """ + if self.error: + raise self.error if event_data.partition_key and self.partition: raise ValueError("EventData partition key cannot be used with a partition sender.") if callback: @@ -79,6 +133,8 @@ def wait(self): """ Wait until all transferred events have been sent. """ + if self.error: + raise self.error try: self._handler.wait() except Exception as e: diff --git a/azure/eventprocessorhost/abstract_event_processor.py b/azure/eventprocessorhost/abstract_event_processor.py index 6ff9dd4..12302b5 100644 --- a/azure/eventprocessorhost/abstract_event_processor.py +++ b/azure/eventprocessorhost/abstract_event_processor.py @@ -43,7 +43,7 @@ async def process_events_async(self, context, messages): :param context: Information about the partition :type context: ~azure.eventprocessorhost.partition_context.PartitionContext :param messages: The events to be processed. - :type messages: list[~azure.eventhub.EventData] + :type messages: list[~azure.eventhub.common.EventData] """ pass diff --git a/azure/eventprocessorhost/eh_partition_pump.py b/azure/eventprocessorhost/eh_partition_pump.py index 1c1b813..86c42d2 100644 --- a/azure/eventprocessorhost/eh_partition_pump.py +++ b/azure/eventprocessorhost/eh_partition_pump.py @@ -141,7 +141,7 @@ async def process_events_async(self, events): by running faster than OnEvents. :param events: List of events to be processed. - :type events: list of ~azure.eventhub.EventData + :type events: list of ~azure.eventhub.common.EventData """ await self.eh_partition_pump.process_events_async(events) diff --git a/azure/eventprocessorhost/partition_context.py b/azure/eventprocessorhost/partition_context.py index fb619b3..9eaf53f 100644 --- a/azure/eventprocessorhost/partition_context.py +++ b/azure/eventprocessorhost/partition_context.py @@ -30,7 +30,7 @@ def set_offset_and_sequence_number(self, event_data): Updates offset based on event. :param event_data: A received EventData with valid offset and sequenceNumber. - :type event_data: ~azure.eventhub.EventData + :type event_data: ~azure.eventhub.common.EventData """ if not event_data: raise Exception(event_data) @@ -73,7 +73,7 @@ async def checkpoint_async_event_data(self, event_data): then writes those values to the checkpoint store via the checkpoint manager. :param event_data: A received EventData with valid offset and sequenceNumber. - :type event_data: ~azure.eventhub.EventData + :type event_data: ~azure.eventhub.common.EventData :raises: ValueError if suplied event_data is None. :raises: ValueError if the sequenceNumber is less than the last checkpointed value. """ diff --git a/azure/eventprocessorhost/partition_pump.py b/azure/eventprocessorhost/partition_pump.py index 26e62b0..769e2ce 100644 --- a/azure/eventprocessorhost/partition_pump.py +++ b/azure/eventprocessorhost/partition_pump.py @@ -136,7 +136,7 @@ async def process_events_async(self, events): Process pump events. :param events: List of events to be processed. - :type events: list[~azure.eventhub.EventData] + :type events: list[~azure.eventhub.common.EventData] """ if events: # Synchronize to serialize calls to the processor. The handler is not installed until diff --git a/conftest.py b/conftest.py index 1673946..6932e27 100644 --- a/conftest.py +++ b/conftest.py @@ -23,7 +23,7 @@ from azure.eventprocessorhost.abstract_event_processor import AbstractEventProcessor -log = get_logger(None, logging.INFO) +log = get_logger(None, logging.DEBUG) @pytest.fixture() def live_eventhub_config(): @@ -49,6 +49,49 @@ def connection_str(): pytest.skip("No EventHub connection string found.") +@pytest.fixture() +def invalid_hostname(): + try: + conn_str = os.environ['EVENT_HUB_CONNECTION_STR'] + return conn_str.replace("Endpoint=sb://", "Endpoint=sb://invalid.") + except KeyError: + pytest.skip("No EventHub connection string found.") + + +@pytest.fixture() +def invalid_key(): + try: + conn_str = os.environ['EVENT_HUB_CONNECTION_STR'] + return conn_str.replace("SharedAccessKey=", "SharedAccessKey=invalid") + except KeyError: + pytest.skip("No EventHub connection string found.") + + +@pytest.fixture() +def invalid_policy(): + try: + conn_str = os.environ['EVENT_HUB_CONNECTION_STR'] + return conn_str.replace("SharedAccessKeyName=", "SharedAccessKeyName=invalid") + except KeyError: + pytest.skip("No EventHub connection string found.") + + +@pytest.fixture() +def iot_connection_str(): + try: + return os.environ['IOT_HUB_CONNECTION_STR'] + except KeyError: + pytest.skip("No IotHub connection string found.") + + +@pytest.fixture() +def device_id(): + try: + return os.environ['IOTHUB_DEVICE'] + except KeyError: + pytest.skip("No Iothub device ID found.") + + @pytest.fixture() def receivers(connection_str): client = EventHubClient.from_connection_string(connection_str, debug=True) diff --git a/examples/eph.py b/examples/eph.py index 9ad16e7..7a9633c 100644 --- a/examples/eph.py +++ b/examples/eph.py @@ -58,7 +58,7 @@ async def process_events_async(self, context, messages): :param context: Information about the partition :type context: ~azure.eventprocessorhost.PartitionContext :param messages: The events to be processed. - :type messages: list[~azure.eventhub.EventData] + :type messages: list[~azure.eventhub.common.EventData] """ logger.info("Events processed {}".format(context.sequence_number)) await context.checkpoint_async() diff --git a/tests/__init__.py b/tests/__init__.py index 7b7c91a..7ec7d3b 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -12,7 +12,7 @@ def get_logger(filename, level=logging.INFO): azure_logger = logging.getLogger("azure") azure_logger.setLevel(level) uamqp_logger = logging.getLogger("uamqp") - uamqp_logger.setLevel(logging.INFO) + uamqp_logger.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') console_handler = logging.StreamHandler(stream=sys.stdout) diff --git a/tests/test_iothub_receive.py b/tests/test_iothub_receive.py new file mode 100644 index 0000000..78c1de8 --- /dev/null +++ b/tests/test_iothub_receive.py @@ -0,0 +1,22 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import os +import pytest +import time + +from azure import eventhub +from azure.eventhub import EventData, EventHubClient, Offset + +def test_iothub_receive(iot_connection_str, device_id): + client = EventHubClient.from_iothub_connection_string(iot_connection_str, debug=True) + receiver = client.add_receiver("$default", "0", operation='/messages/events') + try: + client.run() + received = receiver.receive(timeout=5) + assert len(received) == 0 + finally: + client.stop() \ No newline at end of file diff --git a/tests/test_iothub_receive_async.py b/tests/test_iothub_receive_async.py new file mode 100644 index 0000000..a7126d3 --- /dev/null +++ b/tests/test_iothub_receive_async.py @@ -0,0 +1,66 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import os +import asyncio +import pytest +import time + +from azure import eventhub +from azure.eventhub import EventData, Offset, EventHubError, EventHubClientAsync + + +async def pump(receiver, sleep=None): + messages = 0 + if sleep: + await asyncio.sleep(sleep) + batch = await receiver.receive(timeout=1) + while batch: + messages += len(batch) + batch = await receiver.receive(timeout=1) + return messages + + +@pytest.mark.asyncio +async def test_iothub_receive_async(iot_connection_str): + client = EventHubClientAsync.from_iothub_connection_string(iot_connection_str, debug=True) + receivers = [] + for i in range(2): + receivers.append(client.add_async_receiver("$default", "0", prefetch=1000, operation='/messages/events')) + await client.run_async() + try: + outputs = await asyncio.gather( + pump(receivers[0]), + pump(receivers[1]), + return_exceptions=True) + + assert isinstance(outputs[0], int) and outputs[0] == 0 + assert isinstance(outputs[1], int) and outputs[1] == 0 + except: + raise + finally: + await client.stop_async() + + +@pytest.mark.asyncio +async def test_iothub_receive_detach_async(iot_connection_str): + client = EventHubClientAsync.from_iothub_connection_string(iot_connection_str, debug=True) + receivers = [] + for i in range(2): + receivers.append(client.add_async_receiver("$default", str(i), prefetch=1000, operation='/messages/events')) + await client.run_async() + try: + outputs = await asyncio.gather( + pump(receivers[0]), + pump(receivers[1]), + return_exceptions=True) + + assert isinstance(outputs[0], int) and outputs[0] == 0 + assert isinstance(outputs[1], EventHubError) + except: + raise + finally: + await client.stop_async() \ No newline at end of file diff --git a/tests/test_iothub_send.py b/tests/test_iothub_send.py new file mode 100644 index 0000000..7c0dd7c --- /dev/null +++ b/tests/test_iothub_send.py @@ -0,0 +1,28 @@ +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import os +import pytest +import time +import uuid + +from uamqp.message import MessageProperties + +from azure import eventhub +from azure.eventhub import EventData, EventHubClient + + +def test_iothub_send_single_event(iot_connection_str, device_id): + client = EventHubClient.from_iothub_connection_string(iot_connection_str, debug=True) + sender = client.add_sender(operation='/messages/devicebound') + try: + client.run() + outcome = sender.send(EventData(b"A single event", to_device=device_id)) + assert outcome.value == 0 + except: + raise + finally: + client.stop() diff --git a/tests/test_negative.py b/tests/test_negative.py index 2eaff40..3c8dde6 100644 --- a/tests/test_negative.py +++ b/tests/test_negative.py @@ -17,8 +17,98 @@ EventHubClient) -def test_send_partition_key_with_partition(connection_str): - client = EventHubClient.from_connection_string(connection_str, debug=False) +def test_send_with_invalid_hostname(invalid_hostname, receivers): + client = EventHubClient.from_connection_string(invalid_hostname, debug=False) + sender = client.add_sender() + with pytest.raises(EventHubError): + client.run() + + +@pytest.mark.asyncio +async def test_send_with_invalid_hostname_async(invalid_hostname, receivers): + client = EventHubClientAsync.from_connection_string(invalid_hostname, debug=True) + sender = client.add_async_sender() + with pytest.raises(EventHubError): + await client.run_async() + + +def test_receive_with_invalid_hostname_sync(invalid_hostname): + client = EventHubClient.from_connection_string(invalid_hostname, debug=True) + receiver = client.add_receiver("$default", "0") + with pytest.raises(EventHubError): + client.run() + + +@pytest.mark.asyncio +async def test_receive_with_invalid_hostname_async(invalid_hostname): + client = EventHubClientAsync.from_connection_string(invalid_hostname, debug=True) + sender = client.add_async_receiver("$default", "0") + with pytest.raises(EventHubError): + await client.run_async() + + +def test_send_with_invalid_key(invalid_key, receivers): + client = EventHubClient.from_connection_string(invalid_key, debug=False) + sender = client.add_sender() + with pytest.raises(EventHubError): + client.run() + + +@pytest.mark.asyncio +async def test_send_with_invalid_key_async(invalid_key, receivers): + client = EventHubClientAsync.from_connection_string(invalid_key, debug=False) + sender = client.add_async_sender() + with pytest.raises(EventHubError): + await client.run_async() + + +def test_receive_with_invalid_key_sync(invalid_key): + client = EventHubClient.from_connection_string(invalid_key, debug=True) + receiver = client.add_receiver("$default", "0") + with pytest.raises(EventHubError): + client.run() + + +@pytest.mark.asyncio +async def test_receive_with_invalid_key_async(invalid_key): + client = EventHubClientAsync.from_connection_string(invalid_key, debug=True) + sender = client.add_async_receiver("$default", "0") + with pytest.raises(EventHubError): + await client.run_async() + + +def test_send_with_invalid_policy(invalid_policy, receivers): + client = EventHubClient.from_connection_string(invalid_policy, debug=False) + sender = client.add_sender() + with pytest.raises(EventHubError): + client.run() + + +@pytest.mark.asyncio +async def test_send_with_invalid_policy_async(invalid_policy, receivers): + client = EventHubClientAsync.from_connection_string(invalid_policy, debug=False) + sender = client.add_async_sender() + with pytest.raises(EventHubError): + await client.run_async() + + +def test_receive_with_invalid_policy_sync(invalid_policy): + client = EventHubClient.from_connection_string(invalid_policy, debug=True) + receiver = client.add_receiver("$default", "0") + with pytest.raises(EventHubError): + client.run() + + +@pytest.mark.asyncio +async def test_receive_with_invalid_policy_async(invalid_policy): + client = EventHubClientAsync.from_connection_string(invalid_policy, debug=True) + sender = client.add_async_receiver("$default", "0") + with pytest.raises(EventHubError): + await client.run_async() + + +def test_send_partition_key_with_partition_sync(connection_str): + client = EventHubClient.from_connection_string(connection_str, debug=True) sender = client.add_sender(partition="1") try: client.run() @@ -26,15 +116,13 @@ def test_send_partition_key_with_partition(connection_str): data.partition_key = b"PKey" with pytest.raises(ValueError): sender.send(data) - except: - raise finally: client.stop() @pytest.mark.asyncio async def test_send_partition_key_with_partition_async(connection_str): - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + client = EventHubClientAsync.from_connection_string(connection_str, debug=True) sender = client.add_async_sender(partition="1") try: await client.run_async() @@ -42,8 +130,6 @@ async def test_send_partition_key_with_partition_async(connection_str): data.partition_key = b"PKey" with pytest.raises(ValueError): await sender.send(data) - except: - raise finally: await client.stop_async() @@ -51,70 +137,42 @@ async def test_send_partition_key_with_partition_async(connection_str): def test_non_existing_entity_sender(connection_str): client = EventHubClient.from_connection_string(connection_str, eventhub="nemo", debug=False) sender = client.add_sender(partition="1") - try: + with pytest.raises(EventHubError): client.run() - data = EventData(b"Data") - with pytest.raises(EventHubError): - sender.send(data) - except: - raise - finally: - client.stop() @pytest.mark.asyncio async def test_non_existing_entity_sender_async(connection_str): client = EventHubClientAsync.from_connection_string(connection_str, eventhub="nemo", debug=False) sender = client.add_async_sender(partition="1") - try: + with pytest.raises(EventHubError): await client.run_async() - data = EventData(b"Data") - with pytest.raises(EventHubError): - await sender.send(data) - except: - raise - finally: - await client.stop_async() def test_non_existing_entity_receiver(connection_str): client = EventHubClient.from_connection_string(connection_str, eventhub="nemo", debug=False) receiver = client.add_receiver("$default", "0") - try: + with pytest.raises(EventHubError): client.run() - with pytest.raises(EventHubError): - receiver.receive(timeout=5) - except: - raise - finally: - client.stop() @pytest.mark.asyncio async def test_non_existing_entity_receiver_async(connection_str): client = EventHubClientAsync.from_connection_string(connection_str, eventhub="nemo", debug=False) receiver = client.add_async_receiver("$default", "0") - try: + with pytest.raises(EventHubError): await client.run_async() - with pytest.raises(EventHubError): - await receiver.receive(timeout=5) - except: - raise - finally: - await client.stop_async() -def test_receive_from_invalid_partitions(connection_str): +def test_receive_from_invalid_partitions_sync(connection_str): partitions = ["XYZ", "-1", "1000", "-" ] for p in partitions: - client = EventHubClient.from_connection_string(connection_str, debug=False) + client = EventHubClient.from_connection_string(connection_str, debug=True) receiver = client.add_receiver("$default", p) try: - client.run() with pytest.raises(EventHubError): - receiver.receive(timeout=5) - except: - raise + client.run() + receiver.receive(timeout=10) finally: client.stop() @@ -126,11 +184,9 @@ async def test_receive_from_invalid_partitions_async(connection_str): client = EventHubClientAsync.from_connection_string(connection_str, debug=False) receiver = client.add_async_receiver("$default", p) try: - await client.run_async() with pytest.raises(EventHubError): - await receiver.receive(timeout=5) - except: - raise + await client.run_async() + await receiver.receive(timeout=10) finally: await client.stop_async() @@ -140,13 +196,11 @@ def test_send_to_invalid_partitions(connection_str): for p in partitions: client = EventHubClient.from_connection_string(connection_str, debug=False) sender = client.add_sender(partition=p) + client.run() + data = EventData(b"A" * 300000) try: - client.run() - data = EventData(b"Data") with pytest.raises(EventHubError): sender.send(data) - except: - raise finally: client.stop() @@ -157,19 +211,16 @@ async def test_send_to_invalid_partitions_async(connection_str): for p in partitions: client = EventHubClientAsync.from_connection_string(connection_str, debug=False) sender = client.add_async_sender(partition=p) + await client.run_async() + data = EventData(b"A" * 300000) try: - await client.run_async() - data = EventData(b"Data") with pytest.raises(EventHubError): await sender.send(data) - except: - raise finally: await client.stop_async() def test_send_too_large_message(connection_str): - partitions = ["XYZ", "-1", "1000", "-" ] client = EventHubClient.from_connection_string(connection_str, debug=False) sender = client.add_sender() try: @@ -177,15 +228,12 @@ def test_send_too_large_message(connection_str): data = EventData(b"A" * 300000) with pytest.raises(EventHubError): sender.send(data) - except: - raise finally: client.stop() @pytest.mark.asyncio async def test_send_too_large_message_async(connection_str): - partitions = ["XYZ", "-1", "1000", "-" ] client = EventHubClientAsync.from_connection_string(connection_str, debug=False) sender = client.add_async_sender() try: @@ -193,8 +241,6 @@ async def test_send_too_large_message_async(connection_str): data = EventData(b"A" * 300000) with pytest.raises(EventHubError): await sender.send(data) - except: - raise finally: await client.stop_async() @@ -208,15 +254,12 @@ def test_send_null_body(connection_str): with pytest.raises(ValueError): data = EventData(None) sender.send(data) - except: - raise finally: client.stop() @pytest.mark.asyncio async def test_send_null_body_async(connection_str): - partitions = ["XYZ", "-1", "1000", "-" ] client = EventHubClientAsync.from_connection_string(connection_str, debug=False) sender = client.add_async_sender() try: @@ -224,8 +267,6 @@ async def test_send_null_body_async(connection_str): with pytest.raises(ValueError): data = EventData(None) await sender.send(data) - except: - raise finally: await client.stop_async() @@ -241,12 +282,12 @@ async def pump(receiver): @pytest.mark.asyncio async def test_max_receivers_async(connection_str, senders): - client = EventHubClientAsync.from_connection_string(connection_str, debug=False) + client = EventHubClientAsync.from_connection_string(connection_str, debug=True) receivers = [] for i in range(6): receivers.append(client.add_async_receiver("$default", "0", prefetch=1000, offset=Offset('@latest'))) - await client.run_async() try: + await client.run_async() outputs = await asyncio.gather( pump(receivers[0]), pump(receivers[1]), @@ -255,9 +296,7 @@ async def test_max_receivers_async(connection_str, senders): pump(receivers[4]), pump(receivers[5]), return_exceptions=True) + print(outputs) assert len([o for o in outputs if isinstance(o, EventHubError)]) == 1 - - except: - raise finally: await client.stop_async() \ No newline at end of file diff --git a/tests/test_receive_async.py b/tests/test_receive_async.py index 68a87ef..d002674 100644 --- a/tests/test_receive_async.py +++ b/tests/test_receive_async.py @@ -180,10 +180,14 @@ async def test_receive_batch_async(connection_str, senders): async def pump(receiver, sleep=None): messages = 0 + count = 0 if sleep: await asyncio.sleep(sleep) batch = await receiver.receive(timeout=10) while batch: + count += 1 + if count >= 10: + break messages += len(batch) batch = await receiver.receive(timeout=10) return messages @@ -191,21 +195,17 @@ async def pump(receiver, sleep=None): @pytest.mark.asyncio async def test_epoch_receiver_async(connection_str, senders): - pytest.skip("") client = EventHubClientAsync.from_connection_string(connection_str, debug=False) receivers = [] for epoch in [10, 20]: - receivers.append(client.add_async_epoch_receiver("$default", "0", epoch, prefetch=1000)) - await client.run_async() + receivers.append(client.add_async_epoch_receiver("$default", "0", epoch, prefetch=5)) try: + await client.run_async() outputs = await asyncio.gather( pump(receivers[0]), pump(receivers[1]), return_exceptions=True) - # Depending on how many messages are present and how long the test - # runs, one receiver may not throw and error - in which case it should - # still not have received any messages. - assert isinstance(outputs[0], EventHubError) or outputs[0] == 0 + assert isinstance(outputs[0], EventHubError) assert outputs[1] >= 1 except: raise @@ -215,18 +215,16 @@ async def test_epoch_receiver_async(connection_str, senders): @pytest.mark.asyncio async def test_multiple_receiver_async(connection_str, senders): - pytest.skip("") client = EventHubClientAsync.from_connection_string(connection_str, debug=True) receivers = [] for i in range(2): - receivers.append(client.add_async_receiver("$default", "0", prefetch=1000)) - await client.run_async() + receivers.append(client.add_async_receiver("$default", "0", prefetch=10)) try: + await client.run_async() outputs = await asyncio.gather( pump(receivers[0]), pump(receivers[1]), return_exceptions=True) - print(outputs) assert isinstance(outputs[0], int) and outputs[0] >= 1 assert isinstance(outputs[1], int) and outputs[1] >= 1 except: @@ -237,23 +235,17 @@ async def test_multiple_receiver_async(connection_str, senders): @pytest.mark.asyncio async def test_epoch_receiver_after_non_epoch_receiver_async(connection_str, senders): - pytest.skip("") client = EventHubClientAsync.from_connection_string(connection_str, debug=False) receivers = [] - receivers.append(client.add_async_receiver("$default", "0", prefetch=1000)) - receivers.append(client.add_async_epoch_receiver("$default", "0", 15, prefetch=1000)) - - await client.run_async() + receivers.append(client.add_async_receiver("$default", "0", prefetch=10)) + receivers.append(client.add_async_epoch_receiver("$default", "0", 15, prefetch=10)) try: + await client.run_async() outputs = await asyncio.gather( pump(receivers[0]), pump(receivers[1], sleep=5), return_exceptions=True) - # Depending on how many messages are present and how long the test - # runs, one receiver may not throw and error - in which case it should - # still not have received any messages. - print(outputs) - assert isinstance(outputs[0], EventHubError) or outputs[0] == 0 + assert isinstance(outputs[0], EventHubError) assert isinstance(outputs[1], int) and outputs[1] >= 1 except: raise @@ -265,16 +257,14 @@ async def test_epoch_receiver_after_non_epoch_receiver_async(connection_str, sen async def test_non_epoch_receiver_after_epoch_receiver_async(connection_str, senders): client = EventHubClientAsync.from_connection_string(connection_str, debug=False) receivers = [] - receivers.append(client.add_async_epoch_receiver("$default", "0", 15, prefetch=1000)) - receivers.append(client.add_async_receiver("$default", "0", prefetch=1000)) - - await client.run_async() + receivers.append(client.add_async_epoch_receiver("$default", "0", 15, prefetch=10)) + receivers.append(client.add_async_receiver("$default", "0", prefetch=10)) try: + await client.run_async() outputs = await asyncio.gather( pump(receivers[0]), pump(receivers[1]), return_exceptions=True) - print(outputs) assert isinstance(outputs[1], EventHubError) assert isinstance(outputs[0], int) and outputs[0] >= 1 except: From 70a07a322c2854ae2949fa5af382a6b20a58e484 Mon Sep 17 00:00:00 2001 From: annatisch Date: Thu, 5 Jul 2018 12:05:18 -0700 Subject: [PATCH 09/11] Updates for RC1 release --- HISTORY.rst | 6 +- azure/eventhub/__init__.py | 2 +- azure/eventhub/_async/__init__.py | 8 +-- azure/eventhub/_async/receiver_async.py | 1 - azure/eventhub/client.py | 82 +++++++++++++++++++------ azure/eventprocessorhost/eph.py | 3 +- pylintrc | 2 +- setup.py | 2 +- tests/__init__.py | 2 +- tests/test_longrunning_receive.py | 7 +-- 10 files changed, 80 insertions(+), 35 deletions(-) diff --git a/HISTORY.rst b/HISTORY.rst index 3d567d5..9e63944 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -8,11 +8,11 @@ Release History - **Breaking change** Restructured library to support Python 3.7. Submodule `async` has been renamed and all classes from this module can now be imported from azure.eventhub directly. -- Updated uAMQP dependency to vRC2 -- Added support for constructing IoTHub connections. -- Removed optional `callback` argument from `Receiver.receive` and `AsyncReceiver.receive`. +- **Breaking change** Removed optional `callback` argument from `Receiver.receive` and `AsyncReceiver.receive`. This removes the potential for messages to be processed via callback for not yet returned in the batch. +- Updated uAMQP dependency to v0.1.0 +- Added support for constructing IoTHub connections. - Fixed memory leak in receive operations. - Dropped Python 2.7 wheel support. diff --git a/azure/eventhub/__init__.py b/azure/eventhub/__init__.py index d730dcb..5182b38 100644 --- a/azure/eventhub/__init__.py +++ b/azure/eventhub/__init__.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -__version__ = "0.2.0b2" +__version__ = "0.2.0rc1" from azure.eventhub.common import EventData, EventHubError, Offset from azure.eventhub.client import EventHubClient diff --git a/azure/eventhub/_async/__init__.py b/azure/eventhub/_async/__init__.py index e80d78a..b17394c 100644 --- a/azure/eventhub/_async/__init__.py +++ b/azure/eventhub/_async/__init__.py @@ -86,7 +86,7 @@ async def _wait_for_client(self, client): try: while client.get_handler_state().value == 2: await self.connection.work_async() - except Exception as exp: + except Exception as exp: # pylint: disable=broad-except await client.close_async(exception=exp) async def _start_client_async(self, client): @@ -96,13 +96,13 @@ async def _start_client_async(self, client): while not started: await self.connection.work_async() started = await client.has_started() - except Exception as exp: + except Exception as exp: # pylint: disable=broad-except await client.close_async(exception=exp) async def _handle_redirect(self, redirects): if len(redirects) != len(self.clients): not_redirected = [c for c in self.clients if not c.redirected] - done, timeout = await asyncio.wait([self._wait_for_client(c) for c in not_redirected], timeout=5) + _, timeout = await asyncio.wait([self._wait_for_client(c) for c in not_redirected], timeout=5) if timeout: raise EventHubError("Some clients are attempting to redirect the connection.") redirects = [c.redirected for c in self.clients if c.redirected] @@ -132,7 +132,7 @@ async def run_async(self): redirects = [c.redirected for c in self.clients if c.redirected] failed = [c.error for c in self.clients if c.error] if failed and len(failed) == len(self.clients): - log.warning("{}: All clients failed to start.".format(self.container_id, len(failed))) + log.warning("{}: All clients failed to start.".format(self.container_id)) raise failed[0] elif failed: log.warning("{}: {} clients failed to start.".format(self.container_id, len(failed))) diff --git a/azure/eventhub/_async/receiver_async.py b/azure/eventhub/_async/receiver_async.py index 3506fdf..9438fae 100644 --- a/azure/eventhub/_async/receiver_async.py +++ b/azure/eventhub/_async/receiver_async.py @@ -40,7 +40,6 @@ def __init__(self, client, source, prefetch=300, epoch=None, loop=None): # pyli self.prefetch = prefetch self.properties = None self.epoch = epoch - properties = None if epoch: self.properties = {types.AMQPSymbol(self._epoch): types.AMQPLong(int(epoch))} self._handler = ReceiveClientAsync( diff --git a/azure/eventhub/client.py b/azure/eventhub/client.py index d0a6687..91bc482 100644 --- a/azure/eventhub/client.py +++ b/azure/eventhub/client.py @@ -23,6 +23,7 @@ from azure.eventhub import __version__ from azure.eventhub.sender import Sender from azure.eventhub.receiver import Receiver +from azure.eventhub.common import EventHubError log = logging.getLogger(__name__) @@ -36,6 +37,8 @@ def _parse_conn_str(conn_str): key, _, value = element.partition('=') if key.lower() == 'endpoint': endpoint = value.rstrip('/') + elif key.lower() == 'hostname': + endpoint = value.rstrip('/') elif key.lower() == 'sharedaccesskeyname': shared_access_key_name = value elif key.lower() == 'sharedaccesskey': @@ -113,6 +116,7 @@ def __init__(self, address, username=None, password=None, debug=False): raise ValueError("Missing username and/or password.") auth_uri = "sb://{}{}".format(self.address.hostname, self.address.path) self.auth = self._create_auth(auth_uri, username, password) + self._auth_config = None self.connection = None self.debug = debug @@ -141,7 +145,9 @@ def from_iothub_connection_string(cls, conn_str, **kwargs): hub_name = address.split('.')[0] username = "{}@sas.root.{}".format(policy, hub_name) password = _generate_sas_token(address, policy, key) - return cls(address, username=username, password=password, **kwargs) + client = cls("amqps://" + address, username=username, password=password, **kwargs) + client._auth_config = {'username': policy, 'password': key} # pylint: disable=protected-access + return client def _create_auth(self, auth_uri, username, password): # pylint: disable=no-self-use """ @@ -155,6 +161,8 @@ def _create_auth(self, auth_uri, username, password): # pylint: disable=no-self :param password: The shared access key. :type password: str """ + if "@sas.root" in username: + return authentication.SASLPlain(self.address.hostname, username, password) return authentication.SASTokenAuth.from_shared_access_key(auth_uri, username, password) def _create_properties(self): # pylint: disable=no-self-use @@ -201,18 +209,52 @@ def _close_clients(self): for client in self.clients: client.close() + def _start_clients(self): + for client in self.clients: + try: + client.open(self.connection) + while not client.has_started(): + self.connection.work() + except Exception as exp: # pylint: disable=broad-except + client.close(exception=exp) + + def _handle_redirect(self, redirects): + if len(redirects) != len(self.clients): + raise EventHubError("Some clients are attempting to redirect the connection.") + if not all(r.hostname == redirects[0].hostname for r in redirects): + raise EventHubError("Multiple clients attempting to redirect to different hosts.") + self.auth = self._create_auth(redirects[0].address.decode('utf-8'), **self._auth_config) + self.connection.redirect(redirects[0], self.auth) + for client in self.clients: + client.open(self.connection) + def run(self): """ Run the EventHubClient in blocking mode. Opens the connection and starts running all Sender/Receiver clients. - :rtype: ~azure.eventhub.EventHubClient + :rtype: ~azure.eventhub.client.EventHubClient """ log.info("{}: Starting {} clients".format(self.container_id, len(self.clients))) self._create_connection() - for client in self.clients: - client.open(connection=self.connection) - return self + try: + self._start_clients() + redirects = [c.redirected for c in self.clients if c.redirected] + failed = [c.error for c in self.clients if c.error] + if failed and len(failed) == len(self.clients): + log.warning("{}: All clients failed to start.".format(self.container_id)) + raise failed[0] + elif failed: + log.warning("{}: {} clients failed to start.".format(self.container_id, len(failed))) + elif redirects: + self._handle_redirect(redirects) + except EventHubError: + self.stop() + raise + except Exception as e: + self.stop() + raise EventHubError(str(e)) + return failed def stop(self): """ @@ -262,7 +304,7 @@ def get_eventhub_info(self): finally: mgmt_client.close() - def add_receiver(self, consumer_group, partition, offset=None, prefetch=300): + def add_receiver(self, consumer_group, partition, offset=None, prefetch=300, operation=None): """ Add a receiver to the client for a particular consumer group and partition. @@ -271,21 +313,22 @@ def add_receiver(self, consumer_group, partition, offset=None, prefetch=300): :param partition: The ID of the partition. :type partition: str :param offset: The offset from which to start receiving. - :type offset: ~azure.eventhub.Offset + :type offset: ~azure.eventhub.common.Offset :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int - :rtype: ~azure.eventhub.Receiver + :rtype: ~azure.eventhub.receiver.Receiver """ + path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( - self.address.hostname, self.address.path, consumer_group, partition) + self.address.hostname, path, consumer_group, partition) source = Source(source_url) if offset is not None: source.set_filter(offset.selector()) handler = Receiver(self, source, prefetch=prefetch) - self.clients.append(handler._handler) # pylint: disable=protected-access + self.clients.append(handler) return handler - def add_epoch_receiver(self, consumer_group, partition, epoch, prefetch=300): + def add_epoch_receiver(self, consumer_group, partition, epoch, prefetch=300, operation=None): """ Add a receiver to the client with an epoch value. Only a single epoch receiver can connect to a partition at any given time - additional epoch receivers must have @@ -300,26 +343,29 @@ def add_epoch_receiver(self, consumer_group, partition, epoch, prefetch=300): :type epoch: int :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int - :rtype: ~azure.eventhub.Receiver + :rtype: ~azure.eventhub.receiver.Receiver """ + path = self.address.path + operation if operation else self.address.path source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format( - self.address.hostname, self.address.path, consumer_group, partition) + self.address.hostname, path, consumer_group, partition) handler = Receiver(self, source_url, prefetch=prefetch, epoch=epoch) - self.clients.append(handler._handler) # pylint: disable=protected-access + self.clients.append(handler) return handler - def add_sender(self, partition=None): + def add_sender(self, partition=None, operation=None): """ - Add a sender to the client to send ~azure.eventhub.EventData object + Add a sender to the client to send ~azure.eventhub.common.EventData object to an EventHub. :param partition: Optionally specify a particular partition to send to. If omitted, the events will be distributed to available partitions via round-robin. :type parition: str - :rtype: ~azure.eventhub.Sender + :rtype: ~azure.eventhub.sender.Sender """ target = "amqps://{}{}".format(self.address.hostname, self.address.path) + if operation: + target = target + operation handler = Sender(self, target, partition=partition) - self.clients.append(handler._handler) # pylint: disable=protected-access + self.clients.append(handler) return handler diff --git a/azure/eventprocessorhost/eph.py b/azure/eventprocessorhost/eph.py index c344628..27cbc3e 100644 --- a/azure/eventprocessorhost/eph.py +++ b/azure/eventprocessorhost/eph.py @@ -25,7 +25,8 @@ def __init__(self, event_processor, eh_config, storage_manager, ep_params=None, :type eh_config: ~azure.eventprocessorhost.eh_config.EventHubConfig :param storage_manager: The Azure storage manager for persisting lease and checkpoint information. - :type storage_manager: ~azure.eventprocessorhost.azure_storage_checkpoint_manager.AzureStorageCheckpointLeaseManager + :type storage_manager: + ~azure.eventprocessorhost.azure_storage_checkpoint_manager.AzureStorageCheckpointLeaseManager :param ep_params: Optional arbitrary parameters to be passed into the event_processor on initialization. :type ep_params: list diff --git a/pylintrc b/pylintrc index 14cb341..7b3f956 100644 --- a/pylintrc +++ b/pylintrc @@ -6,7 +6,7 @@ reports=no # For all codes, run 'pylint --list-msgs' or go to 'https://pylint.readthedocs.io/en/latest/reference_guide/features.html' # locally-disabled: Warning locally suppressed using disable-msg # cyclic-import: because of https://github.com/PyCQA/pylint/issues/850 -disable=missing-docstring,locally-disabled,fixme,cyclic-import,too-many-arguments,invalid-name,duplicate-code,logging-format-interpolation,too-many-instance-attributes,too-few-public-methods +disable=raising-bad-type,missing-docstring,locally-disabled,fixme,cyclic-import,too-many-arguments,invalid-name,duplicate-code,logging-format-interpolation,too-many-instance-attributes,too-few-public-methods [FORMAT] max-line-length=120 diff --git a/setup.py b/setup.py index b6275ea..9fce5a2 100644 --- a/setup.py +++ b/setup.py @@ -55,7 +55,7 @@ zip_safe=False, packages=find_packages(exclude=["examples", "tests"]), install_requires=[ - 'uamqp==0.1.0rc2', + 'uamqp~=0.1.0', 'msrestazure~=0.4.11', 'azure-common~=1.1', 'azure-storage~=0.36.0' diff --git a/tests/__init__.py b/tests/__init__.py index 7ec7d3b..7b7c91a 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -12,7 +12,7 @@ def get_logger(filename, level=logging.INFO): azure_logger = logging.getLogger("azure") azure_logger.setLevel(level) uamqp_logger = logging.getLogger("uamqp") - uamqp_logger.setLevel(logging.DEBUG) + uamqp_logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') console_handler = logging.StreamHandler(stream=sys.stdout) diff --git a/tests/test_longrunning_receive.py b/tests/test_longrunning_receive.py index b5519e3..60007a5 100644 --- a/tests/test_longrunning_receive.py +++ b/tests/test_longrunning_receive.py @@ -17,7 +17,7 @@ from urllib.parse import quote_plus from azure.eventhub import Offset -from azure.eventhub.async import EventHubClientAsync +from azure.eventhub import EventHubClientAsync try: import tests @@ -54,6 +54,7 @@ async def pump(_pid, receiver, _args, _dl): total)) except Exception as e: print("Partition {} receiver failed: {}".format(_pid, e)) + raise def test_long_running_receive(): @@ -73,7 +74,7 @@ def test_long_running_receive(): if args.conn_str: client = EventHubClientAsync.from_connection_string( args.conn_str, - eventhub=args.eventhub) + eventhub=args.eventhub, debug=False) elif args.address: client = EventHubClientAsync( args.address, @@ -97,8 +98,6 @@ def test_long_running_receive(): pumps.append(pump(pid, receiver, args, args.duration)) loop.run_until_complete(client.run_async()) loop.run_until_complete(asyncio.gather(*pumps)) - except: - raise finally: loop.run_until_complete(client.stop_async()) From 4d7b28d890dfb6b73594577d905d7a99a0186f4b Mon Sep 17 00:00:00 2001 From: annatisch Date: Thu, 5 Jul 2018 15:38:46 -0700 Subject: [PATCH 10/11] Fix long running test --- tests/test_negative.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test_negative.py b/tests/test_negative.py index 3c8dde6..753f2e2 100644 --- a/tests/test_negative.py +++ b/tests/test_negative.py @@ -273,8 +273,10 @@ async def test_send_null_body_async(connection_str): async def pump(receiver): messages = 0 + count = 0 batch = await receiver.receive(timeout=10) - while batch: + while batch and count <= 5: + count += 1 messages += len(batch) batch = await receiver.receive(timeout=10) return messages From e174bd758dcf96e326a954ed4a9cafceafaaf100 Mon Sep 17 00:00:00 2001 From: annatisch Date: Fri, 6 Jul 2018 09:11:36 -0700 Subject: [PATCH 11/11] Docstring and sample cleanups --- HISTORY.rst | 3 +- azure/eventhub/_async/__init__.py | 13 +++++++-- azure/eventhub/_async/receiver_async.py | 26 ++++++++++++++++- azure/eventhub/_async/sender_async.py | 39 +++++++++++++++++++++++-- azure/eventhub/client.py | 16 +++++++++- azure/eventhub/common.py | 27 ++++++++--------- azure/eventhub/receiver.py | 34 +++++++++++++++++++-- azure/eventhub/sender.py | 34 +++++++++++++++++++-- examples/batch_send.py | 3 +- examples/batch_transfer.py | 3 +- examples/eph.py | 2 +- examples/recv.py | 3 +- examples/recv_async.py | 10 +++---- examples/recv_batch.py | 6 +--- examples/send.py | 1 + examples/send_async.py | 5 ++-- examples/transfer.py | 14 +++++++-- 17 files changed, 196 insertions(+), 43 deletions(-) diff --git a/HISTORY.rst b/HISTORY.rst index 9e63944..c90d34f 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -3,12 +3,13 @@ Release History =============== -0.2.0rc1 (unreleased) +0.2.0rc1 (2018-07-06) +++++++++++++++++++++ - **Breaking change** Restructured library to support Python 3.7. Submodule `async` has been renamed and all classes from this module can now be imported from azure.eventhub directly. - **Breaking change** Removed optional `callback` argument from `Receiver.receive` and `AsyncReceiver.receive`. +- **Breaking change** `EventData.properties` has been renamed to `EventData.application_properties`. This removes the potential for messages to be processed via callback for not yet returned in the batch. - Updated uAMQP dependency to v0.1.0 diff --git a/azure/eventhub/_async/__init__.py b/azure/eventhub/_async/__init__.py index b17394c..e2e727a 100644 --- a/azure/eventhub/_async/__init__.py +++ b/azure/eventhub/_async/__init__.py @@ -117,8 +117,8 @@ async def run_async(self): Run the EventHubClient asynchronously. Opens the connection and starts running all AsyncSender/AsyncReceiver clients. Returns a list of the start up results. For a succcesful client start the - result will be `None`, otherwise the exception raise. - If all clients failed to start, the run will fail, shut down the connection + result will be `None`, otherwise the exception raised. + If all clients failed to start, then run will fail, shut down the connection and raise an exception. If at least one client starts up successfully the run command will succeed. @@ -193,6 +193,9 @@ def add_async_receiver(self, consumer_group, partition, offset=None, prefetch=30 :type offset: ~azure.eventhub.common.Offset :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int + :operation: An optional operation to be appended to the hostname in the source URL. + The value must start with `/` character. + :type operation: str :rtype: ~azure.eventhub._async.receiver_async.ReceiverAsync """ path = self.address.path + operation if operation else self.address.path @@ -220,6 +223,9 @@ def add_async_epoch_receiver(self, consumer_group, partition, epoch, prefetch=30 :type epoch: int :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int + :operation: An optional operation to be appended to the hostname in the source URL. + The value must start with `/` character. + :type operation: str :rtype: ~azure.eventhub._async.receiver_async.ReceiverAsync """ path = self.address.path + operation if operation else self.address.path @@ -238,6 +244,9 @@ def add_async_sender(self, partition=None, operation=None, loop=None): If omitted, the events will be distributed to available partitions via round-robin. :type partition: str + :operation: An optional operation to be appended to the hostname in the target URL. + The value must start with `/` character. + :type operation: str :rtype: ~azure.eventhub._async.sender_async.SenderAsync """ target = "amqps://{}{}".format(self.address.hostname, self.address.path) diff --git a/azure/eventhub/_async/receiver_async.py b/azure/eventhub/_async/receiver_async.py index 9438fae..2ceb518 100644 --- a/azure/eventhub/_async/receiver_async.py +++ b/azure/eventhub/_async/receiver_async.py @@ -52,6 +52,14 @@ def __init__(self, client, source, prefetch=300, epoch=None, loop=None): # pyli loop=self.loop) async def open_async(self, connection): + """ + Open the Receiver using the supplied conneciton. + If the handler has previously been redirected, the redirect + context will be used to create a new handler before opening it. + + :param connection: The underlying client shared connection. + :type: connection: ~uamqp._async.connection_async.ConnectionAsync + """ if self.redirected: self._handler = ReceiveClientAsync( self.redirected.address, @@ -64,6 +72,13 @@ async def open_async(self, connection): await self._handler.open_async(connection=connection) async def has_started(self): + """ + Whether the handler has completed all start up processes such as + establishing the connection, session, link and authentication, and + is not ready to process messages. + + :rtype: bool + """ # pylint: disable=protected-access timeout = False auth_in_progress = False @@ -79,6 +94,15 @@ async def has_started(self): return True async def close_async(self, exception=None): + """ + Close down the handler. If the handler has already closed, + this will be a no op. An optional exception can be passed in to + indicate that the handler was shutdown due to error. + + :param exception: An optional exception if the handler is closing + due to an error. + :type exception: Exception + """ if self.error: return elif isinstance(exception, errors.LinkRedirect): @@ -88,7 +112,7 @@ async def close_async(self, exception=None): elif exception: self.error = EventHubError(str(exception)) else: - self.error = EventHubError("This receive client is now closed.") + self.error = EventHubError("This receive handler is now closed.") await self._handler.close_async() async def receive(self, max_batch_size=None, timeout=None): diff --git a/azure/eventhub/_async/sender_async.py b/azure/eventhub/_async/sender_async.py index 27bce63..3e57e3c 100644 --- a/azure/eventhub/_async/sender_async.py +++ b/azure/eventhub/_async/sender_async.py @@ -18,7 +18,7 @@ class AsyncSender(Sender): def __init__(self, client, target, partition=None, loop=None): # pylint: disable=super-init-not-called """ - Instantiate an EventHub event SenderAsync client. + Instantiate an EventHub event SenderAsync handler. :param client: The parent EventHubClientAsync. :type client: ~azure.eventhub._async.EventHubClientAsync @@ -43,6 +43,14 @@ def __init__(self, client, target, partition=None, loop=None): # pylint: disabl self._condition = None async def open_async(self, connection): + """ + Open the Sender using the supplied conneciton. + If the handler has previously been redirected, the redirect + context will be used to create a new handler before opening it. + + :param connection: The underlying client shared connection. + :type: connection:~uamqp._async.connection_async.ConnectionAsync + """ if self.redirected: self._handler = SendClientAsync( self.redirected.address, @@ -52,6 +60,13 @@ async def open_async(self, connection): await self._handler.open_async(connection=connection) async def has_started(self): + """ + Whether the handler has completed all start up processes such as + establishing the connection, session, link and authentication, and + is not ready to process messages. + + :rtype: bool + """ # pylint: disable=protected-access timeout = False auth_in_progress = False @@ -67,6 +82,15 @@ async def has_started(self): return True async def close_async(self, exception=None): + """ + Close down the handler. If the handler has already closed, + this will be a no op. An optional exception can be passed in to + indicate that the handler was shutdown due to error. + + :param exception: An optional exception if the handler is closing + due to an error. + :type exception: Exception + """ if self.error: return elif isinstance(exception, errors.LinkRedirect): @@ -76,7 +100,7 @@ async def close_async(self, exception=None): elif exception: self.error = EventHubError(str(exception)) else: - self.error = EventHubError("This send client is now closed.") + self.error = EventHubError("This send handler is now closed.") await self._handler.close_async() async def send(self, event_data): @@ -108,3 +132,14 @@ async def send(self, event_data): raise error else: return self._outcome + + async def wait_async(self): + """ + Wait until all transferred events have been sent. + """ + if self.error: + raise self.error + try: + await self._handler.wait_async() + except Exception as e: + raise EventHubError("Send failed: {}".format(e)) diff --git a/azure/eventhub/client.py b/azure/eventhub/client.py index 91bc482..6e37dfe 100644 --- a/azure/eventhub/client.py +++ b/azure/eventhub/client.py @@ -232,8 +232,13 @@ def run(self): """ Run the EventHubClient in blocking mode. Opens the connection and starts running all Sender/Receiver clients. + Returns a list of the start up results. For a succcesful client start the + result will be `None`, otherwise the exception raised. + If all clients failed to start, then run will fail, shut down the connection + and raise an exception. + If at least one client starts up successfully the run command will succeed. - :rtype: ~azure.eventhub.client.EventHubClient + :rtype: list[~azure.eventhub.common.EventHubError] """ log.info("{}: Starting {} clients".format(self.container_id, len(self.clients))) self._create_connection() @@ -316,6 +321,9 @@ def add_receiver(self, consumer_group, partition, offset=None, prefetch=300, ope :type offset: ~azure.eventhub.common.Offset :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int + :operation: An optional operation to be appended to the hostname in the source URL. + The value must start with `/` character. + :type operation: str :rtype: ~azure.eventhub.receiver.Receiver """ path = self.address.path + operation if operation else self.address.path @@ -343,6 +351,9 @@ def add_epoch_receiver(self, consumer_group, partition, epoch, prefetch=300, ope :type epoch: int :param prefetch: The message prefetch count of the receiver. Default is 300. :type prefetch: int + :operation: An optional operation to be appended to the hostname in the source URL. + The value must start with `/` character. + :type operation: str :rtype: ~azure.eventhub.receiver.Receiver """ path = self.address.path + operation if operation else self.address.path @@ -361,6 +372,9 @@ def add_sender(self, partition=None, operation=None): If omitted, the events will be distributed to available partitions via round-robin. :type parition: str + :operation: An optional operation to be appended to the hostname in the target URL. + The value must start with `/` character. + :type operation: str :rtype: ~azure.eventhub.sender.Sender """ target = "amqps://{}{}".format(self.address.hostname, self.address.path) diff --git a/azure/eventhub/common.py b/azure/eventhub/common.py index f528bba..4ba972a 100644 --- a/azure/eventhub/common.py +++ b/azure/eventhub/common.py @@ -36,25 +36,26 @@ def __init__(self, body=None, batch=None, to_device=None, message=None): """ self._partition_key = types.AMQPSymbol(EventData.PROP_PARTITION_KEY) self._annotations = {} - self._properties = {} - self._msg_properties = MessageProperties() + self._app_properties = {} + self.msg_properties = MessageProperties() if to_device: - self._msg_properties.to = '/devices/{}/messages/devicebound'.format(to_device) + self.msg_properties.to = '/devices/{}/messages/devicebound'.format(to_device) if batch: - self.message = BatchMessage(data=batch, multi_messages=True, properties=self._msg_properties) + self.message = BatchMessage(data=batch, multi_messages=True, properties=self.msg_properties) elif message: self.message = message + self.msg_properties = message.properties self._annotations = message.annotations - self._properties = message.application_properties + self._app_properties = message.application_properties else: if isinstance(body, list) and body: - self.message = Message(body[0], properties=self._msg_properties) + self.message = Message(body[0], properties=self.msg_properties) for more in body[1:]: self.message._body.append(more) # pylint: disable=protected-access elif body is None: raise ValueError("EventData cannot be None.") else: - self.message = Message(body, properties=self._msg_properties) + self.message = Message(body, properties=self.msg_properties) @property @@ -129,24 +130,24 @@ def partition_key(self, value): self._annotations = annotations @property - def properties(self): + def application_properties(self): """ Application defined properties on the message. :rtype: dict """ - return self._properties + return self._app_properties - @properties.setter - def properties(self, value): + @application_properties.setter + def application_properties(self, value): """ Application defined properties on the message. :param value: The application properties for the EventData. :type value: dict """ - self._properties = value - properties = dict(self._properties) + self._app_properties = value + properties = dict(self._app_properties) self.message.application_properties = properties @property diff --git a/azure/eventhub/receiver.py b/azure/eventhub/receiver.py index beec103..3cef829 100644 --- a/azure/eventhub/receiver.py +++ b/azure/eventhub/receiver.py @@ -48,6 +48,14 @@ def __init__(self, client, source, prefetch=300, epoch=None): timeout=self.timeout) def open(self, connection): + """ + Open the Receiver using the supplied conneciton. + If the handler has previously been redirected, the redirect + context will be used to create a new handler before opening it. + + :param connection: The underlying client shared connection. + :type: connection: ~uamqp.connection.Connection + """ if self.redirected: self._handler = ReceiveClient( self.redirected.address, @@ -59,10 +67,23 @@ def open(self, connection): self._handler.open(connection) def get_handler_state(self): + """ + Get the state of the underlying handler with regards to start + up processes. + + :rtype: ~uamqp.constants.MessageReceiverState + """ # pylint: disable=protected-access return self._handler._message_receiver.get_state() def has_started(self): + """ + Whether the handler has completed all start up processes such as + establishing the connection, session, link and authentication, and + is not ready to process messages. + + :rtype: bool + """ # pylint: disable=protected-access timeout = False auth_in_progress = False @@ -78,6 +99,15 @@ def has_started(self): return True def close(self, exception=None): + """ + Close down the handler. If the handler has already closed, + this will be a no op. An optional exception can be passed in to + indicate that the handler was shutdown due to error. + + :param exception: An optional exception if the handler is closing + due to an error. + :type exception: Exception + """ if self.error: return elif isinstance(exception, errors.LinkRedirect): @@ -87,13 +117,13 @@ def close(self, exception=None): elif exception: self.error = EventHubError(str(exception)) else: - self.error = EventHubError("This receive client is now closed.") + self.error = EventHubError("This receive handler is now closed.") self._handler.close() @property def queue_size(self): """ - The current size of the unprocessed message queue. + The current size of the unprocessed Event queue. :rtype: int """ diff --git a/azure/eventhub/sender.py b/azure/eventhub/sender.py index 0a0e08c..358a336 100644 --- a/azure/eventhub/sender.py +++ b/azure/eventhub/sender.py @@ -17,7 +17,7 @@ class Sender: def __init__(self, client, target, partition=None): """ - Instantiate an EventHub event Sender client. + Instantiate an EventHub event Sender handler. :param client: The parent EventHubClient. :type client: ~azure.eventhub.client.EventHubClient. @@ -39,6 +39,14 @@ def __init__(self, client, target, partition=None): self._condition = None def open(self, connection): + """ + Open the Sender using the supplied conneciton. + If the handler has previously been redirected, the redirect + context will be used to create a new handler before opening it. + + :param connection: The underlying client shared connection. + :type: connection: ~uamqp.connection.Connection + """ if self.redirected: self._handler = SendClient( self.redirected.address, @@ -48,10 +56,23 @@ def open(self, connection): self._handler.open(connection) def get_handler_state(self): + """ + Get the state of the underlying handler with regards to start + up processes. + + :rtype: ~uamqp.constants.MessageSenderState + """ # pylint: disable=protected-access return self._handler._message_sender.get_state() def has_started(self): + """ + Whether the handler has completed all start up processes such as + establishing the connection, session, link and authentication, and + is not ready to process messages. + + :rtype: bool + """ # pylint: disable=protected-access timeout = False auth_in_progress = False @@ -67,6 +88,15 @@ def has_started(self): return True def close(self, exception=None): + """ + Close down the handler. If the handler has already closed, + this will be a no op. An optional exception can be passed in to + indicate that the handler was shutdown due to error. + + :param exception: An optional exception if the handler is closing + due to an error. + :type exception: Exception + """ if self.error: return elif isinstance(exception, errors.LinkRedirect): @@ -76,7 +106,7 @@ def close(self, exception=None): elif exception: self.error = EventHubError(str(exception)) else: - self.error = EventHubError("This send client is now closed.") + self.error = EventHubError("This send handler is now closed.") self._handler.close() def send(self, event_data): diff --git a/examples/batch_send.py b/examples/batch_send.py index 3f48669..7cbf625 100644 --- a/examples/batch_send.py +++ b/examples/batch_send.py @@ -28,7 +28,8 @@ def data_generator(): - for i in range(15000): + for i in range(1500): + logger.info("Yielding message {}".format(i)) yield b"Hello world" diff --git a/examples/batch_transfer.py b/examples/batch_transfer.py index 99efd9e..676ac6c 100644 --- a/examples/batch_transfer.py +++ b/examples/batch_transfer.py @@ -33,7 +33,8 @@ def callback(outcome, condition): def data_generator(): - for i in range(15000): + for i in range(1500): + logger.info("Yielding message {}".format(i)) yield b"Hello world" diff --git a/examples/eph.py b/examples/eph.py index 7a9633c..39f0fbb 100644 --- a/examples/eph.py +++ b/examples/eph.py @@ -79,7 +79,7 @@ async def wait_and_close(host): """ Run EventProcessorHost for 2 minutes then shutdown. """ - await asyncio.sleep(120) + await asyncio.sleep(60) await host.close_async() diff --git a/examples/recv.py b/examples/recv.py index 25e2d34..92a5df2 100644 --- a/examples/recv.py +++ b/examples/recv.py @@ -38,9 +38,10 @@ receiver = client.add_receiver(CONSUMER_GROUP, PARTITION, prefetch=5000, offset=OFFSET) client.run() start_time = time.time() - for event_data in receiver.receive(timeout=10): + for event_data in receiver.receive(timeout=100): last_offset = event_data.offset last_sn = event_data.sequence_number + print("Received: {}, {}".format(last_offset, last_sn)) total += 1 end_time = time.time() diff --git a/examples/recv_async.py b/examples/recv_async.py index d025bc9..ab8da39 100644 --- a/examples/recv_async.py +++ b/examples/recv_async.py @@ -29,17 +29,17 @@ KEY = os.environ.get('EVENT_HUB_SAS_KEY') CONSUMER_GROUP = "$default" OFFSET = Offset("-1") -PARTITION = "0" -async def pump(client): - receiver = client.add_async_receiver(CONSUMER_GROUP, PARTITION, OFFSET, prefetch=5) +async def pump(client, partition): + receiver = client.add_async_receiver(CONSUMER_GROUP, partition, OFFSET, prefetch=5) await client.run_async() total = 0 start_time = time.time() for event_data in await receiver.receive(timeout=10): last_offset = event_data.offset last_sn = event_data.sequence_number + print("Received: {}, {}".format(last_offset, last_sn)) total += 1 end_time = time.time() run_time = end_time - start_time @@ -52,8 +52,8 @@ async def pump(client): loop = asyncio.get_event_loop() client = EventHubClientAsync(ADDRESS, debug=False, username=USER, password=KEY) tasks = [ - asyncio.ensure_future(pump(client)), - asyncio.ensure_future(pump(client))] + asyncio.ensure_future(pump(client, "0")), + asyncio.ensure_future(pump(client, "1"))] loop.run_until_complete(asyncio.wait(tasks)) loop.run_until_complete(client.stop_async()) loop.close() diff --git a/examples/recv_batch.py b/examples/recv_batch.py index f6373e4..7ce562d 100644 --- a/examples/recv_batch.py +++ b/examples/recv_batch.py @@ -31,10 +31,6 @@ PARTITION = "0" -def on_event_data(event_data): - logger.debug("Got event no. {}".format(event_data.sequence_number)) - - total = 0 last_sn = -1 last_offset = "-1" @@ -42,7 +38,7 @@ def on_event_data(event_data): try: receiver = client.add_receiver(CONSUMER_GROUP, PARTITION, prefetch=100, offset=OFFSET) client.run() - batched_events = receiver.receive(max_batch_size=10, callback=on_event_data) + batched_events = receiver.receive(max_batch_size=10) for event_data in batched_events: last_offset = event_data.offset last_sn = event_data.sequence_number diff --git a/examples/send.py b/examples/send.py index 9ba01f6..fe1d91c 100644 --- a/examples/send.py +++ b/examples/send.py @@ -37,6 +37,7 @@ try: start_time = time.time() for i in range(100): + logger.info("Sending message: {}".format(i)) sender.send(EventData(str(i))) except: raise diff --git a/examples/send_async.py b/examples/send_async.py index 96a6ce1..248fdcf 100644 --- a/examples/send_async.py +++ b/examples/send_async.py @@ -35,9 +35,10 @@ async def run(client): async def send(snd, count): for i in range(count): - data = EventData(str(i) + logger.info("Sending message: {}".format(i)) + data = EventData(str(i)) data.partition_key = b'SamplePartitionKey' - await snd.send(data)) + await snd.send(data) try: if not ADDRESS: diff --git a/examples/transfer.py b/examples/transfer.py index 5a37dda..5190add 100644 --- a/examples/transfer.py +++ b/examples/transfer.py @@ -27,18 +27,26 @@ USER = os.environ.get('EVENT_HUB_SAS_POLICY') KEY = os.environ.get('EVENT_HUB_SAS_KEY') + +def callback(outcome, condition): + logger.info("Message sent. Outcome: {}, Condition: {}".format( + outcome, condition)) + + try: if not ADDRESS: raise ValueError("No EventHubs URL supplied.") client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY) - sender = client.add_sender() + sender = client.add_sender(partition="1") client.run() try: start_time = time.time() - for i in range(1000): - sender.transfer(EventData(str(i))) + for i in range(100): + sender.transfer(EventData(str(i)), callback=callback) + logger.info("Queued 100 messages.") sender.wait() + logger.info("Finished processing queue.") except: raise finally: