diff --git a/ChangeLog.txt b/ChangeLog.txt
index 986b301bebb4..38eac376ef8d 100644
--- a/ChangeLog.txt
+++ b/ChangeLog.txt
@@ -1,2 +1,6 @@
-2012-??-?? Version 0.9.0
- * Initial Release
\ No newline at end of file
+2012-10-16 Version 0.6.0
+ * Added service management API
+ * Added ability to specify custom hosts
+ * Added proxy server support (HTTP CONNECT tunneling)
+2012-06-06 Version 0.5.0
+ * Initial Release
diff --git a/README.md b/README.md
index cf8df8a0f985..02b8cbd0f6da 100644
--- a/README.md
+++ b/README.md
@@ -21,6 +21,17 @@ Python Developer Center.
* Service Bus
* Queues: create, list and delete queues; create, list, and delete subscriptions; send, receive, unlock and delete messages
* Topics: create, list, and delete topics; create, list, and delete rules
+* Service Management
+ * storage accounts: create, update, delete, list, regenerate keys
+ * affinity groups: create, update, delete, list, get properties
+ * locations: list
+ * hosted services: create, update, delete, list, get properties
+ * deployment: create, get, delete, swap, change configuration, update status, upgrade, rollback
+ * role instance: reboot, reimage
+ * discover addresses and ports for the endpoints of other role instances in your service
+ * get configuration settings and access local resources
+ * get role instance information for current role and other role instances
+ * query and set the status of the current role
# Getting Started
## Download Source Code
@@ -45,20 +56,21 @@ the local Storage Emulator (with the exception of Service Bus features).
# Usage
## Table Storage
-To ensure a table exists, call **create_table**:
+To ensure a table exists, call **create\_table**:
```Python
from azure.storage import TableService
ts = TableService(account_name, account_key)
-table = ts.create_table('tasktable')
+ts.create_table('tasktable')
```
-A new entity can be added by calling **insert_entity**:
+A new entity can be added by calling **insert\_entity**:
```Python
+from datetime import datetime
ts = TableService(account_name, account_key)
-table = ts.create_table('tasktable')
-table.insert_entity(
+ts.create_table('tasktable')
+ts.insert_entity(
'tasktable',
{
'PartitionKey' : 'tasksSeattle',
@@ -69,7 +81,7 @@ table.insert_entity(
)
```
-The method **get_entity** can then be used to fetch the entity that was just inserted:
+The method **get\_entity** can then be used to fetch the entity that was just inserted:
```Python
ts = TableService(account_name, account_key)
@@ -78,27 +90,25 @@ entity = ts.get_entity('tasktable', 'tasksSeattle', '1')
## Blob Storage
-The **create_container** method can be used to create a
+The **create\_container** method can be used to create a
container in which to store a blob:
```Python
from azure.storage import BlobService
-blob_service = BlobService()
-container = blob_service.create_container('taskcontainer')
+blob_service = BlobService(account_name, account_key)
+blob_service.create_container('taskcontainer')
```
-To upload a file (assuming it is called task1-upload.txt, it contains the exact text "hello world" (no quotation marks), and it is placed in the same folder as the script below), the method **put_blob** can be used:
+To upload a file (assuming it is called task1-upload.txt, it contains the exact text "hello world" (no quotation marks), and it is placed in the same folder as the script below), the method **put\_blob** can be used:
```Python
from azure.storage import BlobService
blob_service = BlobService(account_name, account_key)
-blob_service.put_blob('taskcontainer', 'task1',
-blobService = azure.createBlobService()
-blobService.put_blob('taskcontainer', 'task1', file('task1-upload.txt').read())
+blob_service.put_blob('taskcontainer', 'task1', file('task1-upload.txt').read(), 'BlockBlob')
```
-To download the blob and write it to the file system, the **get_blob** method can be used:
+To download the blob and write it to the file system, the **get\_blob** method can be used:
```Python
from azure.storage import BlobService
@@ -108,15 +118,15 @@ blob = blob_service.get_blob('taskcontainer', 'task1')
## Storage Queues
-The **create_queue** method can be used to ensure a queue exists:
+The **create\_queue** method can be used to ensure a queue exists:
```Python
from azure.storage import QueueService
queue_service = QueueService(account_name, account_key)
-queue = queue_service.create_queue('taskqueue')
+queue_service.create_queue('taskqueue')
```
-The **put_message** method can then be called to insert the message into the queue:
+The **put\_message** method can then be called to insert the message into the queue:
```Python
from azure.storage import QueueService
@@ -124,72 +134,148 @@ queue_service = QueueService(account_name, account_key)
queue_service.put_message('taskqueue', 'Hello world!')
```
-It is then possible to call the **get___messages** method, process the message and then call **delete_message** on the messages ID. This two-step process ensures messages don't get lost when they are removed from the queue.
+It is then possible to call the **get\_messages** method, process the message and then call **delete\_message** with the message id and receipt. This two-step process ensures messages don't get lost when they are removed from the queue.
```Python
from azure.storage import QueueService
queue_service = QueueService(account_name, account_key)
messages = queue_service.get_messages('taskqueue')
-queue_service.delete_message('taskqueue', messages[0].message_id)
+queue_service.delete_message('taskqueue', messages[0].message_id, messages[0].pop_receipt)
```
## ServiceBus Queues
ServiceBus Queues are an alternative to Storage Queues that might be useful in scenarios where more advanced messaging features are needed (larger message sizes, message ordering, single-operaiton destructive reads, scheduled delivery) using push-style delivery (using long polling).
-The **create_queue** method can be used to ensure a queue exists:
+The **create\_queue** method can be used to ensure a queue exists:
```Python
from azure.servicebus import ServiceBusService
-sbs = ServiceBusService(service_namespace, account_key)
-queue = sbs.create_queue('taskqueue');
+sbs = ServiceBusService(service_namespace, account_key, 'owner')
+sbs.create_queue('taskqueue')
```
-The **send__queue__message** method can then be called to insert the message into the queue:
+The **send\_queue\_message** method can then be called to insert the message into the queue:
```Python
-from azure.servicebus import ServiceBusService
-sbs = ServiceBusService(service_namespace, account_key)
-sbs.send_queue_message('taskqueue', 'Hello World!')
+from azure.servicebus import ServiceBusService, Message
+sbs = ServiceBusService(service_namespace, account_key, 'owner')
+msg = Message('Hello World!')
+sbs.send_queue_message('taskqueue', msg)
```
-It is then possible to call the **read__delete___queue__message** method to dequeue the message.
+It is then possible to call the **receive\_queue\_message** method to dequeue the message.
```Python
from azure.servicebus import ServiceBusService
-sbs = ServiceBusService(service_namespace, account_key)
-msg = sbs.read_delete_queue_message('taskqueue')
+sbs = ServiceBusService(service_namespace, account_key, 'owner')
+msg = sbs.receive_queue_message('taskqueue')
```
## ServiceBus Topics
ServiceBus topics are an abstraction on top of ServiceBus Queues that make pub/sub scenarios easy to implement.
-The **create_topic** method can be used to create a server-side topic:
+The **create\_topic** method can be used to create a server-side topic:
```Python
from azure.servicebus import ServiceBusService
-sbs = ServiceBusService(service_namespace, account_key)
-topic = sbs.create_topic('taskdiscussion')
+sbs = ServiceBusService(service_namespace, account_key, 'owner')
+sbs.create_topic('taskdiscussion')
```
-The **send__topic__message** method can be used to send a message to a topic:
+The **send\_topic\_message** method can be used to send a message to a topic:
```Python
-from azure.servicebus import ServiceBusService
-sbs = ServiceBusService(service_namespace, account_key)
-sbs.send_topic_message('taskdiscussion', 'Hello world!')
+from azure.servicebus import ServiceBusService, Message
+sbs = ServiceBusService(service_namespace, account_key, 'owner')
+msg = Message('Hello World!')
+sbs.send_topic_message('taskdiscussion', msg)
```
-A client can then create a subscription and start consuming messages by calling the **create__subscription** method followed by the **receive__subscription__message** method. Please note that any messages sent before the subscription is created will not be received.
+A client can then create a subscription and start consuming messages by calling the **create\_subscription** method followed by the **receive\_subscription\_message** method. Please note that any messages sent before the subscription is created will not be received.
```Python
-from azure.servicebus import ServiceBusService
-sbs = ServiceBusService(service_namespace, account_key)
+from azure.servicebus import ServiceBusService, Message
+sbs = ServiceBusService(service_namespace, account_key, 'owner')
sbs.create_subscription('taskdiscussion', 'client1')
+msg = Message('Hello World!')
+sbs.send_topic_message('taskdiscussion', msg)
msg = sbs.receive_subscription_message('taskdiscussion', 'client1')
```
+
+## Service Management
+
+### Set-up certificates
+
+You need to create two certificates, one for the server (a .cer file) and one for the client (a .pem file). To create the .pem file using [OpenSSL](http://www.openssl.org), execute this:
+
+ openssl req -x509 -nodes -days 365 -newkey rsa:1024 -keyout mycert.pem -out mycert.pem
+
+To create the .cer certificate, execute this:
+
+ openssl x509 -inform pem -in mycert.pem -outform der -out mycert.cer
+
+### List Available Locations
+
+```Python
+locations = sms.list_locations()
+for location in locations:
+ print(location.name)
+```
+
+### Create a Storage Service
+
+To create a storage service, you need a name for the service (between 3 and 24 lowercase characters and unique within Windows Azure), a label (up to 100 characters, automatically encoded to base-64), and either a location or an affinity group.
+
+```Python
+name = "mystorageservice"
+desc = name
+label = name
+location = 'West US'
+
+result = sms.create_storage_account(name, desc, label, location=location)
+```
+
+
+### Create a Cloud Service
+
+A cloud service is also known as a hosted service (from earlier versions of Windows Azure). The **create_hosted_service** method allows you to create a new hosted service by providing a hosted service name (which must be unique in Windows Azure), a label (automatically encoded to base-64), and the location *or* the affinity group for your service.
+
+```Python
+name = "myhostedservice"
+desc = name
+label = name
+location = 'West US'
+
+result = sms.create_hosted_service(name, label, desc, location=location)
+```
+
+### Create a Deployment
+
+To make a new deployment to Azure you must store the package file in a Windows Azure Blob Storage account under the same subscription as the hosted service to which the package is being uploaded. You can create a deployment package with the [Windows Azure PowerShell cmdlets](https://www.windowsazure.com/en-us/develop/php/how-to-guides/powershell-cmdlets/), or with the [cspack commandline tool](http://msdn.microsoft.com/en-us/library/windowsazure/gg432988.aspx).
+
+```Python
+service_name = "myhostedservice"
+deployment_name = "v1"
+slot = 'Production'
+package_url = "URL_for_.cspkg_file"
+configuration = base64.b64encode(open(file_path, 'rb').read('path_to_.cscfg_file'))
+label = service_name
+
+result = sms.create_deployment(service_name,
+ slot,
+ deployment_name,
+ package_url,
+ label,
+ configuration)
+
+operation = sms.get_operation_status(result.request_id)
+print('Operation status: ' + operation.status)
+```
+
+
** For more examples please see the [Windows Azure Python Developer Center](http://www.windowsazure.com/en-us/develop/python) **
# Need Help?
diff --git a/src/azure.pyproj b/src/azure.pyproj
index c5a84ad4f3e5..c76335724cb3 100644
--- a/src/azure.pyproj
+++ b/src/azure.pyproj
@@ -20,6 +20,10 @@
2af0f10d-7135-4994-9156-5d01c9c11b7e
2.7
+ SAK
+ SAK
+ SAK
+ SAK
true
@@ -34,6 +38,8 @@
+
+
@@ -44,10 +50,12 @@
+
+
-
+
diff --git a/src/azure/__init__.py b/src/azure/__init__.py
index eccc0d681e02..7ede4ce9b1c3 100644
--- a/src/azure/__init__.py
+++ b/src/azure/__init__.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
+import sys
import types
from datetime import datetime
from xml.dom import minidom
@@ -28,6 +29,7 @@
QUEUE_SERVICE_HOST_BASE = '.queue.core.windows.net'
TABLE_SERVICE_HOST_BASE = '.table.core.windows.net'
SERVICE_BUS_HOST_BASE = '.servicebus.windows.net'
+MANAGEMENT_HOST = 'management.core.windows.net'
#Development ServiceClient URLs
DEV_BLOB_HOST = '127.0.0.1:10000'
@@ -43,12 +45,12 @@
_ERROR_CANNOT_FIND_ROW_KEY = 'Cannot find row key in request.'
_ERROR_INCORRECT_TABLE_IN_BATCH = 'Table should be the same in a batch operations'
_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH = 'Partition Key should be the same in a batch operations'
-_ERROR_DUPLICATE_ROW_KEY_IN_BATCH = 'Partition Key should be the same in a batch operations'
+_ERROR_DUPLICATE_ROW_KEY_IN_BATCH = 'Row Keys should not be the same in a batch operations'
_ERROR_BATCH_COMMIT_FAIL = 'Batch Commit Fail'
_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_DELETE = 'Message is not peek locked and cannot be deleted.'
_ERROR_MESSAGE_NOT_PEEK_LOCKED_ON_UNLOCK = 'Message is not peek locked and cannot be unlocked.'
-_ERROR_QUEUE_NOT_FOUND = 'Queue is not Found'
-_ERROR_TOPIC_NOT_FOUND = 'Topic is not Found'
+_ERROR_QUEUE_NOT_FOUND = 'Queue was not found'
+_ERROR_TOPIC_NOT_FOUND = 'Topic was not found'
_ERROR_CONFLICT = 'Conflict'
_ERROR_NOT_FOUND = 'Not found'
_ERROR_UNKNOWN = 'Unknown error (%s)'
@@ -58,6 +60,10 @@
_ERROR_VALUE_SHOULD_NOT_BE_NULL = '%s should not be None.'
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY = 'Cannot serialize the specified value (%s) to an entity. Please use an EntityProperty (which can specify custom types), int, str, bool, or datetime'
+_USER_AGENT_STRING = 'pyazure'
+
+METADATA_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata'
+
class WindowsAzureData(object):
''' This is the base of data class. It is only used to check whether it is instance or not. '''
pass
@@ -80,8 +86,14 @@ def __init__(self, message):
self.message = message
class Feed:
- def __init__(self, type):
- self.type = type
+ pass
+
+class _Base64String(str):
+ pass
+
+class HeaderDict(dict):
+ def __getitem__(self, index):
+ return super(HeaderDict, self).__getitem__(index.lower())
def _get_readable_id(id_name):
"""simplified an id to be more friendly for us people"""
@@ -97,6 +109,9 @@ def _get_entry_properties(xmlstr, include_id):
properties = {}
for entry in _get_child_nodes(xmldoc, 'entry'):
+ etag = entry.getAttributeNS(METADATA_NS, 'etag')
+ if etag:
+ properties['etag'] = etag
for updated in _get_child_nodes(entry, 'updated'):
properties['updated'] = updated.firstChild.nodeValue
for name in _get_children_from_path(entry, 'author', 'name'):
@@ -109,6 +124,14 @@ def _get_entry_properties(xmlstr, include_id):
return properties
+def _get_first_child_node_value(parent_node, node_name):
+ xml_attrs = _get_child_nodes(parent_node, node_name)
+ if xml_attrs:
+ xml_attr = xml_attrs[0]
+ if xml_attr.firstChild:
+ value = xml_attr.firstChild.nodeValue
+ return value
+
def _get_child_nodes(node, tagName):
return [childNode for childNode in node.getElementsByTagName(tagName)
if childNode.parentNode == node]
@@ -142,7 +165,7 @@ def _create_entry(entry_body):
updated_str += '+00:00'
entry_start = '''
-
+
{updated}
{body}'''
@@ -156,6 +179,16 @@ def _to_datetime(strtime):
'content_md5':'Content-MD5',
'last_modified': 'Last-Modified',
'cache_control': 'Cache-Control',
+ 'account_admin_live_email_id': 'AccountAdminLiveEmailId',
+ 'service_admin_live_email_id': 'ServiceAdminLiveEmailId',
+ 'subscription_id': 'SubscriptionID',
+ 'fqdn': 'FQDN',
+ 'private_id': 'PrivateID',
+ 'os_virtual_hard_disk': 'OSVirtualHardDisk',
+ 'logical_disk_size_in_gb':'LogicalDiskSizeInGB',
+ 'logical_size_in_gb':'LogicalSizeInGB',
+ 'os':'OS',
+ 'persistent_vm_downtime_info':'PersistentVMDowntimeInfo',
}
def _get_serialization_name(element_name):
@@ -186,6 +219,18 @@ def _int_or_none(value):
return str(int(value))
+def _bool_or_none(value):
+ if value is None:
+ return None
+
+ if isinstance(value, bool):
+ if value:
+ return 'true'
+ else:
+ return 'false'
+
+ return str(value)
+
def _convert_class_to_xml(source, xml_prefix = True):
if source is None:
return ''
@@ -242,11 +287,25 @@ def _clone_node_with_namespaces(node_to_clone, original_doc):
return clone
def _convert_response_to_feeds(response, convert_func):
- feeds = []
+ if response is None:
+ return None
+
+ feeds = _list_of(Feed)
+
+ x_ms_continuation = HeaderDict()
+ for name, value in response.headers:
+ if 'x-ms-continuation' in name:
+ x_ms_continuation[name[len('x-ms-continuation')+1:]] = value
+ if x_ms_continuation:
+ setattr(feeds, 'x_ms_continuation', x_ms_continuation)
+
xmldoc = minidom.parseString(response.body)
- for xml_entry in _get_children_from_path(xmldoc, 'feed', 'entry'):
+ xml_entries = _get_children_from_path(xmldoc, 'feed', 'entry')
+ if not xml_entries:
+ xml_entries = _get_children_from_path(xmldoc, 'entry') #in some cases, response contains only entry but no feed
+ for xml_entry in xml_entries:
new_node = _clone_node_with_namespaces(xml_entry, xmldoc)
- feeds.append(convert_func(new_node.toxml()))
+ feeds.append(convert_func(new_node.toxml('utf-8')))
return feeds
@@ -254,15 +313,74 @@ def _validate_not_none(param_name, param):
if param is None:
raise TypeError(_ERROR_VALUE_SHOULD_NOT_BE_NULL % (param_name))
-def _html_encode(html):
- ch_map = (('&', '&'), ('<', '<'), ('>', '>'), ('"', '"'), ('\'', '&apos'))
- for name, value in ch_map:
- html = html.replace(name, value)
- return html
+def _fill_list_of(xmldoc, element_type, xml_element_name):
+ xmlelements = _get_child_nodes(xmldoc, xml_element_name)
+ return [_parse_response_body_from_xml_node(xmlelement, element_type) for xmlelement in xmlelements]
+
+def _fill_scalar_list_of(xmldoc, element_type, parent_xml_element_name, xml_element_name):
+ '''Converts an xml fragment into a list of scalar types. The parent xml element contains a
+ flat list of xml elements which are converted into the specified scalar type and added to the list.
+ Example:
+ xmldoc=
+
+ http://{storage-service-name}.blob.core.windows.net/
+ http://{storage-service-name}.queue.core.windows.net/
+ http://{storage-service-name}.table.core.windows.net/
+
+ element_type=str
+ parent_xml_element_name='Endpoints'
+ xml_element_name='Endpoint'
+ '''
+ xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)
+ if xmlelements:
+ xmlelements = _get_child_nodes(xmlelements[0], xml_element_name)
+ return [_get_node_value(xmlelement, element_type) for xmlelement in xmlelements]
+
+def _fill_dict(xmldoc, element_name):
+ xmlelements = _get_child_nodes(xmldoc, element_name)
+ if xmlelements:
+ return_obj = {}
+ for child in xmlelements[0].childNodes:
+ if child.firstChild:
+ return_obj[child.nodeName] = child.firstChild.nodeValue
+ return return_obj
+
+def _fill_dict_of(xmldoc, parent_xml_element_name, pair_xml_element_name, key_xml_element_name, value_xml_element_name):
+ '''Converts an xml fragment into a dictionary. The parent xml element contains a
+ list of xml elements where each element has a child element for the key, and another for the value.
+ Example:
+ xmldoc=
+
+
+ Ext1
+ Val1
+
+
+ Ext2
+ Val2
+
+
+ element_type=str
+ parent_xml_element_name='ExtendedProperties'
+ pair_xml_element_name='ExtendedProperty'
+ key_xml_element_name='Name'
+ value_xml_element_name='Value'
+ '''
+ return_obj = { }
+
+ xmlelements = _get_child_nodes(xmldoc, parent_xml_element_name)
+ if xmlelements:
+ xmlelements = _get_child_nodes(xmlelements[0], pair_xml_element_name)
+ for pair in xmlelements:
+ keys = _get_child_nodes(pair, key_xml_element_name)
+ values = _get_child_nodes(pair, value_xml_element_name)
+ if keys and values:
+ key = str(keys[0].firstChild.nodeValue)
+ value = str(values[0].firstChild.nodeValue)
-def _fill_list_of(xmldoc, element_type):
- xmlelements = _get_child_nodes(xmldoc, element_type.__name__)
- return [_parse_response_body(xmlelement.toxml(), element_type) for xmlelement in xmlelements]
+ return_obj[key] = value
+
+ return return_obj
def _fill_instance_child(xmldoc, element_name, return_type):
'''Converts a child of the current dom element to the specified type. The child name
@@ -272,11 +390,14 @@ def _fill_instance_child(xmldoc, element_name, return_type):
if not xmlelements:
return None
- return _fill_instance_element(xmlelements[0], return_type)
+ return_obj = return_type()
+ _fill_data_to_return_object(xmlelements[0], return_obj)
+
+ return return_obj
def _fill_instance_element(element, return_type):
"""Converts a DOM element into the specified object"""
- return _parse_response_body(element.toxml(), return_type)
+ return _parse_response_body_from_xml_node(element, return_type)
def _fill_data_minidom(xmldoc, element_name, data_member):
@@ -296,6 +417,15 @@ def _fill_data_minidom(xmldoc, element_name, data_member):
else:
return type(data_member)(value)
+def _get_node_value(xmlelement, data_type):
+ value = xmlelement.firstChild.nodeValue
+ if data_type is datetime:
+ return _to_datetime(value)
+ elif data_type is types.BooleanType:
+ return value.lower() != 'false'
+ else:
+ return data_type(value)
+
def _get_request_body(request_body):
'''Converts an object into a request body. If it's None
we'll return an empty string, if it's one of our objects it'll
@@ -365,32 +495,77 @@ def _parse_response(response, return_type):
'''
parse the HTTPResponse's body and fill all the data into a class of return_type
'''
- return _parse_response_body(response.body, return_type)
+ return _parse_response_body_from_xml_text(response.body, return_type)
+
+def _fill_data_to_return_object(node, return_obj):
+ members = dict(vars(return_obj))
+ for name, value in members.iteritems():
+ if isinstance(value, _list_of):
+ setattr(return_obj, name, _fill_list_of(node, value.list_type, value.xml_element_name))
+ elif isinstance(value, _scalar_list_of):
+ setattr(return_obj, name, _fill_scalar_list_of(node, value.list_type, _get_serialization_name(name), value.xml_element_name))
+ elif isinstance(value, _dict_of):
+ setattr(return_obj, name, _fill_dict_of(node, _get_serialization_name(name), value.pair_xml_element_name, value.key_xml_element_name, value.value_xml_element_name))
+ elif isinstance(value, WindowsAzureData):
+ setattr(return_obj, name, _fill_instance_child(node, name, value.__class__))
+ elif isinstance(value, dict):
+ setattr(return_obj, name, _fill_dict(node, _get_serialization_name(name)))
+ elif isinstance(value, _Base64String):
+ value = _fill_data_minidom(node, name, '')
+ if value is not None:
+ value = base64.b64decode(value)
+ #always set the attribute, so we don't end up returning an object with type _Base64String
+ setattr(return_obj, name, value)
+ else:
+ value = _fill_data_minidom(node, name, value)
+ if value is not None:
+ setattr(return_obj, name, value)
+
+def _parse_response_body_from_xml_node(node, return_type):
+ '''
+ parse the xml and fill all the data into a class of return_type
+ '''
+ return_obj = return_type()
+ _fill_data_to_return_object(node, return_obj)
+
+ return return_obj
-def _parse_response_body(respbody, return_type):
+def _parse_response_body_from_xml_text(respbody, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
doc = minidom.parseString(respbody)
return_obj = return_type()
for node in _get_child_nodes(doc, return_type.__name__):
- for name, value in vars(return_obj).iteritems():
- if isinstance(value, _list_of):
- setattr(return_obj, name, _fill_list_of(node, value.list_type))
- elif isinstance(value, WindowsAzureData):
- setattr(return_obj, name, _fill_instance_child(node, name, value.__class__))
- else:
- value = _fill_data_minidom(node, name, value)
- if value is not None:
- setattr(return_obj, name, value)
+ _fill_data_to_return_object(node, return_obj)
return return_obj
+class _dict_of(dict):
+ """a dict which carries with it the xml element names for key,val.
+ Used for deserializaion and construction of the lists"""
+ def __init__(self, pair_xml_element_name, key_xml_element_name, value_xml_element_name):
+ self.pair_xml_element_name = pair_xml_element_name
+ self.key_xml_element_name = key_xml_element_name
+ self.value_xml_element_name = value_xml_element_name
+
class _list_of(list):
"""a list which carries with it the type that's expected to go in it.
Used for deserializaion and construction of the lists"""
- def __init__(self, list_type):
+ def __init__(self, list_type, xml_element_name=None):
+ self.list_type = list_type
+ if xml_element_name is None:
+ self.xml_element_name = list_type.__name__
+ else:
+ self.xml_element_name = xml_element_name
+
+class _scalar_list_of(list):
+ """a list of scalar types which carries with it the type that's
+ expected to go in it along with its xml element name.
+ Used for deserializaion and construction of the lists"""
+ def __init__(self, list_type, xml_element_name):
self.list_type = list_type
+ self.xml_element_name = xml_element_name
def _update_request_uri_query_local_storage(request, use_local_storage):
''' create correct uri and query for the request '''
@@ -406,16 +581,12 @@ def _update_request_uri_query(request):
appear after the existing parameters'''
if '?' in request.path:
- pos = request.path.find('?')
- query_string = request.path[pos+1:]
- request.path = request.path[:pos]
+ request.path, _, query_string = request.path.partition('?')
if query_string:
query_params = query_string.split('&')
for query in query_params:
if '=' in query:
- pos = query.find('=')
- name = query[:pos]
- value = query[pos+1:]
+ name, _, value = query.partition('=')
request.query.append((name, value))
request.path = urllib2.quote(request.path, '/()$=\',')
@@ -443,14 +614,27 @@ def _dont_fail_not_exist(error):
return False
else:
raise error
+
+def _general_error_handler(http_error):
+ ''' Simple error handler for azure.'''
+ if http_error.status == 409:
+ raise WindowsAzureConflictError(_ERROR_CONFLICT)
+ elif http_error.status == 404:
+ raise WindowsAzureMissingResourceError(_ERROR_NOT_FOUND)
+ else:
+ if http_error.respbody is not None:
+ raise WindowsAzureError(_ERROR_UNKNOWN % http_error.message + '\n' + http_error.respbody)
+ else:
+ raise WindowsAzureError(_ERROR_UNKNOWN % http_error.message)
def _parse_response_for_dict(response):
''' Extracts name-values from response header. Filter out the standard http headers.'''
-
+
+ if response is None:
+ return None
http_headers = ['server', 'date', 'location', 'host',
- 'via', 'proxy-connection', 'x-ms-version', 'connection',
- 'content-length']
- return_dict = {}
+ 'via', 'proxy-connection', 'connection']
+ return_dict = HeaderDict()
if response.headers:
for name, value in response.headers:
if not name.lower() in http_headers:
@@ -461,6 +645,8 @@ def _parse_response_for_dict(response):
def _parse_response_for_dict_prefix(response, prefix):
''' Extracts name-values for names starting with prefix from response header. Filter out the standard http headers.'''
+ if response is None:
+ return None
return_dict = {}
orig_dict = _parse_response_for_dict(response)
if orig_dict:
@@ -475,6 +661,8 @@ def _parse_response_for_dict_prefix(response, prefix):
def _parse_response_for_dict_filter(response, filter):
''' Extracts name-values for names in filter from response header. Filter out the standard http headers.'''
+ if response is None:
+ return None
return_dict = {}
orig_dict = _parse_response_for_dict(response)
if orig_dict:
@@ -484,23 +672,3 @@ def _parse_response_for_dict_filter(response, filter):
return return_dict
else:
return None
-
-def _get_table_host(account_name, use_local_storage=False):
- ''' Gets service host base on the service type and whether it is using local storage. '''
-
- if use_local_storage:
- return DEV_TABLE_HOST
- else:
- return account_name + TABLE_SERVICE_HOST_BASE
-
-def _get_queue_host(account_name, use_local_storage=False):
- if use_local_storage:
- return DEV_QUEUE_HOST
- else:
- return account_name + QUEUE_SERVICE_HOST_BASE
-
-def _get_blob_host(account_name, use_local_storage=False):
- if use_local_storage:
- return DEV_BLOB_HOST
- else:
- return account_name + BLOB_SERVICE_HOST_BASE
diff --git a/src/azure/http/__init__.py b/src/azure/http/__init__.py
index 3a2dfc515a6f..92ccaeb87fbb 100644
--- a/src/azure/http/__init__.py
+++ b/src/azure/http/__init__.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,6 +13,7 @@
# limitations under the License.
#--------------------------------------------------------------------------
+HTTP_RESPONSE_NO_CONTENT = 204
class HTTPError(Exception):
''' HTTP Exception when response status code >= 300 '''
diff --git a/src/azure/http/batchclient.py b/src/azure/http/batchclient.py
index f0eca01564d0..66f3085208fe 100644
--- a/src/azure/http/batchclient.py
+++ b/src/azure/http/batchclient.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -28,8 +28,8 @@ class _BatchClient(_HTTPClient):
It only supports one changeset.
'''
- def __init__(self, service_instance, account_key, account_name, x_ms_version=None, protocol='http'):
- _HTTPClient.__init__(self, service_instance, account_name=account_name, account_key=account_key, x_ms_version=x_ms_version, protocol=protocol)
+ def __init__(self, service_instance, account_key, account_name, protocol='http'):
+ _HTTPClient.__init__(self, service_instance, account_name=account_name, account_key=account_key, protocol=protocol)
self.is_batch = False
self.batch_requests = []
self.batch_table = ''
diff --git a/src/azure/http/httpclient.py b/src/azure/http/httpclient.py
index fb572592cb72..3cc85c7e693e 100644
--- a/src/azure/http/httpclient.py
+++ b/src/azure/http/httpclient.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -26,13 +26,14 @@
from xml.dom import minidom
from azure.http import HTTPError, HTTPResponse
+from azure import _USER_AGENT_STRING
class _HTTPClient:
'''
Takes the request and sends it to cloud service and returns the response.
'''
- def __init__(self, service_instance, cert_file=None, account_name=None, account_key=None, service_namespace=None, issuer=None, x_ms_version=None, protocol='https'):
+ def __init__(self, service_instance, cert_file=None, account_name=None, account_key=None, service_namespace=None, issuer=None, protocol='https'):
'''
service_instance: service client instance.
cert_file: certificate file name/location. This is only used in hosted service management.
@@ -40,7 +41,6 @@ def __init__(self, service_instance, cert_file=None, account_name=None, account_
account_key: the storage account access key for storage services or servicebus access key for service bus service.
service_namespace: the service namespace for service bus.
issuer: the issuer for service bus service.
- x_ms_version: the x_ms_version for the service.
'''
self.service_instance = service_instance
self.status = None
@@ -51,8 +51,14 @@ def __init__(self, service_instance, cert_file=None, account_name=None, account_
self.account_key = account_key
self.service_namespace = service_namespace
self.issuer = issuer
- self.x_ms_version = x_ms_version
self.protocol = protocol
+ self.proxy_host = None
+ self.proxy_port = None
+
+ def set_proxy(self, host, port):
+ '''Sets the proxy server host and port for the HTTP CONNECT Tunnelling.'''
+ self.proxy_host = host
+ self.proxy_port = port
def get_connection(self, request):
''' Create connection for the request. '''
@@ -67,12 +73,17 @@ def get_connection(self, request):
_connection = httplib.HTTPConnection(request.host)
else:
_connection = httplib.HTTPSConnection(request.host, cert_file=self.cert_file)
+
+ if self.proxy_host:
+ _connection.set_tunnel(self.proxy_host, self.proxy_port)
+
return _connection
def send_request_headers(self, connection, request_headers):
for name, value in request_headers:
if value:
connection.putheader(name, value)
+ connection.putheader('User-Agent', _USER_AGENT_STRING)
connection.endheaders()
def send_request_body(self, connection, request_body):
diff --git a/src/azure/http/winhttp.py b/src/azure/http/winhttp.py
index f67f6de49ee0..139654f9cf53 100644
--- a/src/azure/http/winhttp.py
+++ b/src/azure/http/winhttp.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
-from ctypes import c_void_p, c_long, c_ulong, c_longlong, c_ulonglong, c_short, c_ushort, c_wchar_p, c_byte
+from ctypes import c_void_p, c_long, c_ulong, c_longlong, c_ulonglong, c_short, c_ushort, c_wchar_p, c_byte, c_size_t
from ctypes import byref, Structure, Union, POINTER, WINFUNCTYPE, HRESULT, oledll, WinDLL, cast, create_string_buffer
import ctypes
import urllib2
@@ -33,6 +33,8 @@
VT_UI8 = 21
VT_ARRAY = 8192
+HTTPREQUEST_PROXYSETTING_PROXY = 2
+
HTTPREQUEST_PROXY_SETTING = c_long
HTTPREQUEST_SETCREDENTIALS_FLAGS = c_long
#------------------------------------------------------------------------------
@@ -41,11 +43,24 @@
_oleaut32 = WinDLL('oleaut32')
_CLSIDFromString = _ole32.CLSIDFromString
_CoInitialize = _ole32.CoInitialize
+_CoInitialize.argtypes = [c_void_p]
+
_CoCreateInstance = _ole32.CoCreateInstance
+
_SysAllocString = _oleaut32.SysAllocString
+_SysAllocString.restype = c_void_p
+_SysAllocString.argtypes = [c_wchar_p]
+
_SysFreeString = _oleaut32.SysFreeString
-_SafeArrayDestroy = _oleaut32.SafeArrayDestroy
+_SysFreeString.argtypes = [c_void_p]
+
_CoTaskMemAlloc = _ole32.CoTaskMemAlloc
+_CoTaskMemAlloc.restype = c_void_p
+_CoTaskMemAlloc.argtypes = [c_size_t]
+
+_CoTaskMemFree = _ole32.CoTaskMemFree
+_CoTaskMemFree.argtypes = [c_void_p]
+
#------------------------------------------------------------------------------
class BSTR(c_wchar_p):
@@ -75,8 +90,7 @@ class _tagSAFEARRAYBOUND(Structure):
('rgsabound', _tagSAFEARRAYBOUND*1)]
def __del__(self):
- _SafeArrayDestroy(self.pvdata)
- pass
+ _CoTaskMemFree(self.pvdata)
class VARIANT(Structure):
'''
@@ -215,17 +229,7 @@ def status_text(self):
status_text = bstr_status_text.value
_SysFreeString(bstr_status_text)
return status_text
-
- def response_text(self):
- ''' Gets response body as text. '''
-
- bstr_resptext = c_void_p()
- _WinHttpRequest._ResponseText(self, byref(bstr_resptext))
- bstr_resptext = ctypes.cast(bstr_resptext, c_wchar_p)
- resptext = bstr_resptext.value
- _SysFreeString(bstr_resptext)
- return resptext
-
+
def response_body(self):
'''
Gets response body as a SAFEARRAY and converts the SAFEARRAY to str. If it is an xml
@@ -248,6 +252,24 @@ def set_client_certificate(self, certificate):
_certificate = BSTR(certificate)
_WinHttpRequest._SetClientCertificate(self, _certificate)
+ def set_tunnel(self, host, port):
+ ''' Sets up the host and the port for the HTTP CONNECT Tunnelling.'''
+ url = host
+ if port:
+ url = url + u':' + port
+
+ var_host = VARIANT()
+ var_host.vt = VT_BSTR
+ var_host.vdata.bstrval = BSTR(url)
+
+ var_empty = VARIANT()
+ var_empty.vt = VT_EMPTY
+ var_empty.vdata.llval = 0
+
+ _WinHttpRequest._SetProxy(self, HTTPREQUEST_PROXYSETTING_PROXY, var_host, var_empty)
+
+ _SysFreeString(var_host.vdata.bstrval)
+
def __del__(self):
if self.value is not None:
_WinHttpRequest._Release(self)
@@ -283,9 +305,13 @@ def __init__(self, host, cert_file=None, key_file=None, protocol='http'):
self.protocol = protocol
clsid = GUID('{2087C2F4-2CEF-4953-A8AB-66779B670495}')
iid = GUID('{016FE2EC-B2C8-45F8-B23B-39E53A75396B}')
- _CoInitialize(0)
+ _CoInitialize(None)
_CoCreateInstance(byref(clsid), 0, 1, byref(iid), byref(self._httprequest))
+ def set_tunnel(self, host, port=None):
+ ''' Sets up the host and the port for the HTTP CONNECT Tunnelling. '''
+ self._httprequest.set_tunnel(unicode(host), unicode(str(port)))
+
def putrequest(self, method, uri):
''' Connects to host and sends the request. '''
@@ -330,13 +356,9 @@ def getresponse(self):
for resp_header in fixed_headers:
if ':' in resp_header:
pos = resp_header.find(':')
- headers.append((resp_header[:pos], resp_header[pos+1:].strip()))
+ headers.append((resp_header[:pos].lower(), resp_header[pos+1:].strip()))
body = self._httprequest.response_body()
length = len(body)
return _Response(status, status_text, length, headers, body)
-
-
-
-
diff --git a/src/azure/servicebus/__init__.py b/src/azure/servicebus/__init__.py
index a0a959bae615..a42a19a99c5d 100644
--- a/src/azure/servicebus/__init__.py
+++ b/src/azure/servicebus/__init__.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,11 +22,12 @@
from azure.http import HTTPError
-from azure import (WindowsAzureError, WindowsAzureData,
- _create_entry, _get_entry_properties, _html_encode,
+from azure import (WindowsAzureError, WindowsAzureData, _general_error_handler,
+ _create_entry, _get_entry_properties, xml_escape,
_get_child_nodes, WindowsAzureMissingResourceError,
WindowsAzureConflictError, _get_serialization_name,
- _get_children_from_path)
+ _get_children_from_path, _get_first_child_node_value,
+ _USER_AGENT_STRING)
import azure
#default rule name for subscription
@@ -47,52 +48,90 @@
class Queue(WindowsAzureData):
''' Queue class corresponding to Queue Description: http://msdn.microsoft.com/en-us/library/windowsazure/hh780773'''
- def __init__(self):
- self.lock_duration = None
- self.max_size_in_megabytes = None
- self.duplicate_detection = None
- self.requires_duplicate_detection = None
- self.requires_session = None
- self.default_message_time_to_live = None
- self.enable_dead_lettering_on_message_expiration = None
- self.duplicate_detection_history_time_window = None
- self.max_delivery_count = None
- self.enable_batched_operations = None
- self.size_in_bytes = None
- self.message_count = None
+ def __init__(self,
+ lock_duration=None,
+ max_size_in_megabytes=None,
+ requires_duplicate_detection=None,
+ requires_session=None,
+ default_message_time_to_live=None,
+ dead_lettering_on_message_expiration=None,
+ duplicate_detection_history_time_window=None,
+ max_delivery_count=None,
+ enable_batched_operations=None,
+ size_in_bytes=None,
+ message_count=None):
+
+ self.lock_duration = lock_duration
+ self.max_size_in_megabytes = max_size_in_megabytes
+ self.requires_duplicate_detection = requires_duplicate_detection
+ self.requires_session = requires_session
+ self.default_message_time_to_live = default_message_time_to_live
+ self.dead_lettering_on_message_expiration = dead_lettering_on_message_expiration
+ self.duplicate_detection_history_time_window = duplicate_detection_history_time_window
+ self.max_delivery_count = max_delivery_count
+ self.enable_batched_operations = enable_batched_operations
+ self.size_in_bytes = size_in_bytes
+ self.message_count = message_count
class Topic(WindowsAzureData):
''' Topic class corresponding to Topic Description: http://msdn.microsoft.com/en-us/library/windowsazure/hh780749. '''
- def __init__(self):
- self.default_message_time_to_live = None
- self.max_size_in_mega_bytes = None
- self.requires_duplicate_detection = None
- self.duplicate_detection_history_time_window = None
- self.enable_batched_operations = None
- self.size_in_bytes = None
+ def __init__(self,
+ default_message_time_to_live=None,
+ max_size_in_megabytes=None,
+ requires_duplicate_detection=None,
+ duplicate_detection_history_time_window=None,
+ enable_batched_operations=None,
+ size_in_bytes=None):
+
+ self.default_message_time_to_live = default_message_time_to_live
+ self.max_size_in_megabytes = max_size_in_megabytes
+ self.requires_duplicate_detection = requires_duplicate_detection
+ self.duplicate_detection_history_time_window = duplicate_detection_history_time_window
+ self.enable_batched_operations = enable_batched_operations
+ self.size_in_bytes = size_in_bytes
+
+ @property
+ def max_size_in_mega_bytes(self):
+ import warnings
+ warnings.warn('This attribute has been changed to max_size_in_megabytes.')
+ return self.max_size_in_megabytes
+
+ @max_size_in_mega_bytes.setter
+ def max_size_in_mega_bytes(self, value):
+ self.max_size_in_megabytes = value
+
class Subscription(WindowsAzureData):
''' Subscription class corresponding to Subscription Description: http://msdn.microsoft.com/en-us/library/windowsazure/hh780763. '''
- def __init__(self):
- self.lock_duration = None
- self.requires_session = None
- self.default_message_time_to_live = None
- self.dead_lettering_on_message_expiration = None
- self.dead_lettering_on_filter_evaluation_exceptions = None
- self.enable_batched_operations = None
- self.max_delivery_count = None
- self.message_count = None
+ def __init__(self,
+ lock_duration=None,
+ requires_session=None,
+ default_message_time_to_live=None,
+ dead_lettering_on_message_expiration=None,
+ dead_lettering_on_filter_evaluation_exceptions=None,
+ enable_batched_operations=None,
+ max_delivery_count=None,
+ message_count=None):
+
+ self.lock_duration = lock_duration
+ self.requires_session = requires_session
+ self.default_message_time_to_live = default_message_time_to_live
+ self.dead_lettering_on_message_expiration = dead_lettering_on_message_expiration
+ self.dead_lettering_on_filter_evaluation_exceptions = dead_lettering_on_filter_evaluation_exceptions
+ self.enable_batched_operations = enable_batched_operations
+ self.max_delivery_count = max_delivery_count
+ self.message_count = message_count
class Rule(WindowsAzureData):
''' Rule class corresponding to Rule Description: http://msdn.microsoft.com/en-us/library/windowsazure/hh780753. '''
- def __init__(self):
- self.filter_type = ''
- self.filter_expression = ''
- self.action_type = ''
- self.action_expression = ''
+ def __init__(self, filter_type=None, filter_expression=None, action_type=None, action_expression=None):
+ self.filter_type = filter_type
+ self.filter_expression = filter_expression
+ self.action_type = action_type
+ self.action_expression = action_type
class Message(WindowsAzureData):
''' Message class that used in send message/get mesage apis. '''
@@ -156,7 +195,7 @@ def add_headers(self, request):
elif isinstance(value, datetime):
request.headers.append((name, '"' + value.strftime('%a, %d %b %Y %H:%M:%S GMT') + '"'))
else:
- request.headers.append((name, str(value)))
+ request.headers.append((name, str(value).lower()))
# Adds content-type
request.headers.append(('Content-Type', self.type))
@@ -229,6 +268,7 @@ def _get_token(request, account_key, issuer):
connection = httplib.HTTPSConnection(host)
connection.putrequest('POST', '/WRAPv0.9')
connection.putheader('Content-Length', len(request_body))
+ connection.putheader('User-Agent', _USER_AGENT_STRING)
connection.endheaders()
connection.send(request_body)
resp = connection.getresponse()
@@ -268,11 +308,23 @@ def _create_message(response, service_instance):
message_location = value
elif name.lower() not in ['content-type', 'brokerproperties', 'transfer-encoding', 'server', 'location', 'date']:
if '"' in value:
- custom_properties[name] = value[1:-1]
- else:
- custom_properties[name] = value
+ value = value[1:-1]
+ try:
+ custom_properties[name] = datetime.strptime(value, '%a, %d %b %Y %H:%M:%S GMT')
+ except ValueError:
+ custom_properties[name] = value
+ else: #only int, float or boolean
+ if value.lower() == 'true':
+ custom_properties[name] = True
+ elif value.lower() == 'false':
+ custom_properties[name] = False
+ elif str(int(float(value))) == value: #int('3.1') doesn't work so need to get float('3.14') first
+ custom_properties[name] = int(value)
+ else:
+ custom_properties[name] = float(value)
+
if message_type == None:
- message = Message(respbody, service_instance, message_location, custom_properties, broker_properties)
+ message = Message(respbody, service_instance, message_location, custom_properties, 'application/atom+xml;type=entry;charset=utf-8', broker_properties)
else:
message = Message(respbody, service_instance, message_location, custom_properties, message_type, broker_properties)
return message
@@ -332,18 +384,6 @@ def _parse_bool(value):
return True
return False
-
-_QUEUE_CONVERSION = {
- 'MaxSizeInMegaBytes': int,
- 'RequiresGroupedReceives': _parse_bool,
- 'SupportsDuplicateDetection': _parse_bool,
- 'SizeinBytes': int,
- 'MessageCount': int,
- 'EnableBatchedOperations': _parse_bool,
- 'RequiresSession': _parse_bool,
- 'LockDuration': int,
-}
-
def _convert_xml_to_queue(xmlstr):
''' Converts xml response to queue object.
@@ -363,18 +403,51 @@ def _convert_xml_to_queue(xmlstr):
invalid_queue = True
#get node for each attribute in Queue class, if nothing found then the response is not valid xml for Queue.
- for queue_desc in _get_children_from_path(xmldoc, 'entry', 'content', 'QueueDescription'):
- for attr_name, attr_value in vars(queue).iteritems():
- xml_attrs = _get_child_nodes(queue_desc, _get_serialization_name(attr_name))
- if xml_attrs:
- xml_attr = xml_attrs[0]
- if xml_attr.firstChild:
- value = xml_attr.firstChild.nodeValue
- conversion = _QUEUE_CONVERSION.get(attr_name)
- if conversion is not None:
- value = conversion(value)
- setattr(queue, attr_name, value)
- invalid_queue = False
+ for desc in _get_children_from_path(xmldoc, 'entry', 'content', 'QueueDescription'):
+ node_value = _get_first_child_node_value(desc, 'LockDuration')
+ if node_value is not None:
+ queue.lock_duration = node_value
+ invalid_queue = False
+ node_value = _get_first_child_node_value(desc, 'MaxSizeInMegabytes')
+ if node_value is not None:
+ queue.max_size_in_megabytes = int(node_value)
+ invalid_queue = False
+ node_value = _get_first_child_node_value(desc, 'RequiresDuplicateDetection')
+ if node_value is not None:
+ queue.requires_duplicate_detection = _parse_bool(node_value)
+ invalid_queue = False
+ node_value = _get_first_child_node_value(desc, 'RequiresSession')
+ if node_value is not None:
+ queue.requires_session = _parse_bool(node_value)
+ invalid_queue = False
+ node_value = _get_first_child_node_value(desc, 'DefaultMessageTimeToLive')
+ if node_value is not None:
+ queue.default_message_time_to_live = node_value
+ invalid_queue = False
+ node_value = _get_first_child_node_value(desc, 'DeadLetteringOnMessageExpiration')
+ if node_value is not None:
+ queue.dead_lettering_on_message_expiration = _parse_bool(node_value)
+ invalid_queue = False
+ node_value = _get_first_child_node_value(desc, 'DuplicateDetectionHistoryTimeWindow')
+ if node_value is not None:
+ queue.duplicate_detection_history_time_window = node_value
+ invalid_queue = False
+ node_value = _get_first_child_node_value(desc, 'EnableBatchedOperations')
+ if node_value is not None:
+ queue.enable_batched_operations = _parse_bool(node_value)
+ invalid_queue = False
+ node_value = _get_first_child_node_value(desc, 'MaxDeliveryCount')
+ if node_value is not None:
+ queue.max_delivery_count = int(node_value)
+ invalid_queue = False
+ node_value = _get_first_child_node_value(desc, 'MessageCount')
+ if node_value is not None:
+ queue.message_count = int(node_value)
+ invalid_queue = False
+ node_value = _get_first_child_node_value(desc, 'SizeInBytes')
+ if node_value is not None:
+ queue.size_in_bytes = int(node_value)
+ invalid_queue = False
if invalid_queue:
raise WindowsAzureError(azure._ERROR_QUEUE_NOT_FOUND)
@@ -388,12 +461,6 @@ def _convert_xml_to_queue(xmlstr):
def _convert_response_to_topic(response):
return _convert_xml_to_topic(response.body)
-_TOPIC_CONVERSION = {
- 'MaxSizeInMegaBytes': int,
- 'RequiresDuplicateDetection': _parse_bool,
- 'DeadLetteringOnFilterEvaluationExceptions': _parse_bool
-}
-
def _convert_xml_to_topic(xmlstr):
'''Converts xml response to topic
@@ -402,7 +469,7 @@ def _convert_xml_to_topic(xmlstr):
P10675199DT2H48M5.4775807S
- 1024
+ 1024
false
P7D
true
@@ -414,20 +481,34 @@ def _convert_xml_to_topic(xmlstr):
topic = Topic()
invalid_topic = True
+
#get node for each attribute in Topic class, if nothing found then the response is not valid xml for Topic.
for desc in _get_children_from_path(xmldoc, 'entry', 'content', 'TopicDescription'):
invalid_topic = True
- for attr_name, attr_value in vars(topic).iteritems():
- xml_attrs = _get_child_nodes(desc, _get_serialization_name(attr_name))
- if xml_attrs:
- xml_attr = xml_attrs[0]
- if xml_attr.firstChild:
- value = xml_attr.firstChild.nodeValue
- conversion = _TOPIC_CONVERSION.get(attr_name)
- if conversion is not None:
- value = conversion(value)
- setattr(topic, attr_name, value)
- invalid_topic = False
+ node_value = _get_first_child_node_value(desc, 'DefaultMessageTimeToLive')
+ if node_value is not None:
+ topic.default_message_time_to_live = node_value
+ invalid_topic = False
+ node_value = _get_first_child_node_value(desc, 'MaxSizeInMegabytes')
+ if node_value is not None:
+ topic.max_size_in_megabytes = int(node_value)
+ invalid_topic = False
+ node_value = _get_first_child_node_value(desc, 'RequiresDuplicateDetection')
+ if node_value is not None:
+ topic.requires_duplicate_detection = _parse_bool(node_value)
+ invalid_topic = False
+ node_value = _get_first_child_node_value(desc, 'DuplicateDetectionHistoryTimeWindow')
+ if node_value is not None:
+ topic.duplicate_detection_history_time_window = node_value
+ invalid_topic = False
+ node_value = _get_first_child_node_value(desc, 'EnableBatchedOperations')
+ if node_value is not None:
+ topic.enable_batched_operations = _parse_bool(node_value)
+ invalid_topic = False
+ node_value = _get_first_child_node_value(desc, 'SizeInBytes')
+ if node_value is not None:
+ topic.size_in_bytes = int(node_value)
+ invalid_topic = False
if invalid_topic:
raise WindowsAzureError(azure._ERROR_TOPIC_NOT_FOUND)
@@ -440,15 +521,6 @@ def _convert_xml_to_topic(xmlstr):
def _convert_response_to_subscription(response):
return _convert_xml_to_subscription(response.body)
-_SUBSCRIPTION_CONVERSION = {
- 'RequiresSession' : _parse_bool,
- 'DeadLetteringOnMessageExpiration': _parse_bool,
- 'DefaultMessageTimeToLive': int,
- 'EnableBatchedOperations': _parse_bool,
- 'MaxDeliveryCount': int,
- 'MessageCount': int,
-}
-
def _convert_xml_to_subscription(xmlstr):
'''Converts xml response to subscription
@@ -467,18 +539,31 @@ def _convert_xml_to_subscription(xmlstr):
xmldoc = minidom.parseString(xmlstr)
subscription = Subscription()
- for desc in _get_children_from_path(xmldoc, 'entry', 'content', 'subscriptiondescription'):
- for attr_name, attr_value in vars(subscription).iteritems():
- tag_name = attr_name.replace('_', '')
- xml_attrs = _get_child_nodes(desc, tag_name)
- if xml_attrs:
- xml_attr = xml_attrs[0]
- if xml_attr.firstChild:
- value = xml_attr.firstChild.nodeValue
- conversion = _SUBSCRIPTION_CONVERSION.get(attr_name)
- if conversion is not None:
- value = conversion(value)
- setattr(subscription, attr_name, value)
+ for desc in _get_children_from_path(xmldoc, 'entry', 'content', 'SubscriptionDescription'):
+ node_value = _get_first_child_node_value(desc, 'LockDuration')
+ if node_value is not None:
+ subscription.lock_duration = node_value
+ node_value = _get_first_child_node_value(desc, 'RequiresSession')
+ if node_value is not None:
+ subscription.requires_session = _parse_bool(node_value)
+ node_value = _get_first_child_node_value(desc, 'DefaultMessageTimeToLive')
+ if node_value is not None:
+ subscription.default_message_time_to_live = node_value
+ node_value = _get_first_child_node_value(desc, 'DeadLetteringOnFilterEvaluationExceptions')
+ if node_value is not None:
+ subscription.dead_lettering_on_filter_evaluation_exceptions = _parse_bool(node_value)
+ node_value = _get_first_child_node_value(desc, 'DeadLetteringOnMessageExpiration')
+ if node_value is not None:
+ subscription.dead_lettering_on_message_expiration = _parse_bool(node_value)
+ node_value = _get_first_child_node_value(desc, 'EnableBatchedOperations')
+ if node_value is not None:
+ subscription.enable_batched_operations = _parse_bool(node_value)
+ node_value = _get_first_child_node_value(desc, 'MaxDeliveryCount')
+ if node_value is not None:
+ subscription.max_delivery_count = int(node_value)
+ node_value = _get_first_child_node_value(desc, 'MessageCount')
+ if node_value is not None:
+ subscription.message_count = int(node_value)
for name, value in _get_entry_properties(xmlstr, True).iteritems():
setattr(subscription, name, value)
@@ -496,21 +581,21 @@ def convert_subscription_to_xml(subscription):
subscription_body = ''
if subscription:
if subscription.lock_duration is not None:
- subscription_body += ''.join(['', subscription.lock_duration, ''])
+ subscription_body += ''.join(['', str(subscription.lock_duration), ''])
if subscription.requires_session is not None:
- subscription_body += ''.join(['', subscription.requires_session, ''])
+ subscription_body += ''.join(['', str(subscription.requires_session).lower(), ''])
if subscription.default_message_time_to_live is not None:
- subscription_body += ''.join(['', subscription.default_message_time_to_live, ''])
+ subscription_body += ''.join(['', str(subscription.default_message_time_to_live), ''])
if subscription.dead_lettering_on_message_expiration is not None:
- subscription_body += ''.join(['', subscription.dead_lettering_on_message_expiration, ''])
+ subscription_body += ''.join(['', str(subscription.dead_lettering_on_message_expiration).lower(), ''])
if subscription.dead_lettering_on_filter_evaluation_exceptions is not None:
- subscription_body += ''.join(['', subscription.dead_lettering_on_filter_evaluation_exceptions, ''])
+ subscription_body += ''.join(['', str(subscription.dead_lettering_on_filter_evaluation_exceptions).lower(), ''])
if subscription.enable_batched_operations is not None:
- subscription_body += ''.join(['', subscription.enable_batched_operations, ''])
+ subscription_body += ''.join(['', str(subscription.enable_batched_operations).lower(), ''])
if subscription.max_delivery_count is not None:
- subscription_body += ''.join(['', subscription.max_delivery_count, ''])
+ subscription_body += ''.join(['', str(subscription.max_delivery_count), ''])
if subscription.message_count is not None:
- subscription_body += ''.join(['', subscription.message_count, ''])
+ subscription_body += ''.join(['', str(subscription.message_count), ''])
subscription_body += ''
return _create_entry(subscription_body)
@@ -525,17 +610,18 @@ def convert_rule_to_xml(rule):
rule_body = ''
if rule:
if rule.filter_type:
- rule_body += ''.join([''])
+ rule_body += ''.join([''])
if rule.filter_type == 'CorrelationFilter':
- rule_body += ''.join(['', _html_encode(rule.filter_expression), ''])
+ rule_body += ''.join(['', xml_escape(rule.filter_expression), ''])
else:
- rule_body += ''.join(['', _html_encode(rule.filter_expression), ''])
+ rule_body += ''.join(['', xml_escape(rule.filter_expression), ''])
rule_body += '20'
rule_body += ''
if rule.action_type:
- rule_body += ''.join([''])
- if rule.action_type == 'SqlFilterAction':
- rule_body += ''.join(['', _html_encode(rule.action_expression), ''])
+ rule_body += ''.join([''])
+ if rule.action_type == 'SqlRuleAction':
+ rule_body += ''.join(['', xml_escape(rule.action_expression), ''])
+ rule_body += '20'
rule_body += ''
rule_body += ''
@@ -553,16 +639,16 @@ def convert_topic_to_xml(topic):
if topic:
if topic.default_message_time_to_live is not None:
topic_body += ''.join(['', str(topic.default_message_time_to_live), ''])
- if topic.max_size_in_mega_bytes is not None:
+ if topic.max_size_in_megabytes is not None:
topic_body += ''.join(['', str(topic.max_size_in_megabytes), ''])
if topic.requires_duplicate_detection is not None:
- topic_body += ''.join(['', str(topic.requires_duplicate_detection), ''])
+ topic_body += ''.join(['', str(topic.requires_duplicate_detection).lower(), ''])
if topic.duplicate_detection_history_time_window is not None:
topic_body += ''.join(['', str(topic.duplicate_detection_history_time_window), ''])
if topic.enable_batched_operations is not None:
- topic_body += ''.join(['', str(topic.enable_batched_operations), ''])
+ topic_body += ''.join(['', str(topic.enable_batched_operations).lower(), ''])
if topic.size_in_bytes is not None:
- topic_body += ''.join(['', str(topic.size_in_bytes), ''])
+ topic_body += ''.join(['', str(topic.size_in_bytes), ''])
topic_body += ''
return _create_entry(topic_body)
@@ -581,21 +667,21 @@ def convert_queue_to_xml(queue):
if queue.max_size_in_megabytes is not None:
queue_body += ''.join(['', str(queue.max_size_in_megabytes), ''])
if queue.requires_duplicate_detection is not None:
- queue_body += ''.join(['', str(queue.requires_duplicate_detection), ''])
+ queue_body += ''.join(['', str(queue.requires_duplicate_detection).lower(), ''])
if queue.requires_session is not None:
- queue_body += ''.join(['', str(queue.requires_session), ''])
+ queue_body += ''.join(['', str(queue.requires_session).lower(), ''])
if queue.default_message_time_to_live is not None:
queue_body += ''.join(['', str(queue.default_message_time_to_live), ''])
- if queue.enable_dead_lettering_on_message_expiration is not None:
- queue_body += ''.join(['', str(queue.enable_dead_lettering_on_message_expiration), ''])
+ if queue.dead_lettering_on_message_expiration is not None:
+ queue_body += ''.join(['', str(queue.dead_lettering_on_message_expiration).lower(), ''])
if queue.duplicate_detection_history_time_window is not None:
queue_body += ''.join(['', str(queue.duplicate_detection_history_time_window), ''])
if queue.max_delivery_count is not None:
queue_body += ''.join(['', str(queue.max_delivery_count), ''])
if queue.enable_batched_operations is not None:
- queue_body += ''.join(['', str(queue.enable_batched_operations), ''])
+ queue_body += ''.join(['', str(queue.enable_batched_operations).lower(), ''])
if queue.size_in_bytes is not None:
- queue_body += ''.join(['', str(queue.size_in_bytes), ''])
+ queue_body += ''.join(['', str(queue.size_in_bytes), ''])
if queue.message_count is not None:
queue_body += ''.join(['', str(queue.message_count), ''])
@@ -604,12 +690,6 @@ def convert_queue_to_xml(queue):
def _service_bus_error_handler(http_error):
''' Simple error handler for service bus service. Will add more specific cases '''
-
- if http_error.status == 409:
- raise WindowsAzureConflictError(azure._ERROR_CONFLICT)
- elif http_error.status == 404:
- raise WindowsAzureMissingResourceError(azure._ERROR_NOT_FOUND)
- else:
- raise WindowsAzureError(azure._ERROR_UNKNOWN % http_error.message)
+ return _general_error_handler(http_error)
from azure.servicebus.servicebusservice import ServiceBusService
diff --git a/src/azure/servicebus/servicebusservice.py b/src/azure/servicebus/servicebusservice.py
index 6694a6b0ef3f..81f6ed1bba8b 100644
--- a/src/azure/servicebus/servicebusservice.py
+++ b/src/azure/servicebus/servicebusservice.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
import urllib2
from azure.http.httpclient import _HTTPClient
-from azure.http import HTTPError
+from azure.http import HTTPError, HTTP_RESPONSE_NO_CONTENT
from azure.servicebus import (_update_service_bus_header, _create_message,
convert_topic_to_xml, _convert_response_to_topic,
convert_queue_to_xml, _convert_response_to_queue,
@@ -27,16 +27,15 @@
_convert_xml_to_subscription, _convert_xml_to_rule,
_service_bus_error_handler, AZURE_SERVICEBUS_NAMESPACE,
AZURE_SERVICEBUS_ACCESS_KEY, AZURE_SERVICEBUS_ISSUER)
-from azure.http import HTTPRequest
+from azure.http import HTTPRequest, HTTP_RESPONSE_NO_CONTENT
from azure import (_validate_not_none, Feed,
_convert_response_to_feeds, _str_or_none, _int_or_none,
_get_request_body, _update_request_uri_query,
- _dont_fail_on_exist, _dont_fail_not_exist,
+ _dont_fail_on_exist, _dont_fail_not_exist, WindowsAzureConflictError,
WindowsAzureError, _parse_response, _convert_class_to_xml,
_parse_response_for_dict, _parse_response_for_dict_prefix,
_parse_response_for_dict_filter,
_parse_enum_results_list, _update_request_uri_query_local_storage,
- _get_table_host, _get_queue_host, _get_blob_host,
_parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
class ServiceBusService:
@@ -52,7 +51,7 @@ def create_queue(self, queue_name, queue=None, fail_on_exist=False):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + ''
request.body = _get_request_body(convert_queue_to_xml(queue))
request.path, request.query = _update_request_uri_query(request)
@@ -78,7 +77,7 @@ def delete_queue(self, queue_name, fail_not_exist=False):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -102,7 +101,7 @@ def get_queue(self, queue_name):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -116,7 +115,7 @@ def list_queues(self):
'''
request = HTTPRequest()
request.method = 'GET'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/$Resources/Queues'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -135,7 +134,7 @@ def create_topic(self, topic_name, topic=None, fail_on_exist=False):
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + ''
request.body = _get_request_body(convert_topic_to_xml(topic))
request.path, request.query = _update_request_uri_query(request)
@@ -162,7 +161,7 @@ def delete_topic(self, topic_name, fail_not_exist=False):
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -186,7 +185,7 @@ def get_topic(self, topic_name):
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -200,7 +199,7 @@ def list_topics(self):
'''
request = HTTPRequest()
request.method = 'GET'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/$Resources/Topics'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -222,7 +221,7 @@ def create_rule(self, topic_name, subscription_name, rule_name, rule=None, fail_
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/rules/' + str(rule_name) + ''
request.body = _get_request_body(convert_rule_to_xml(rule))
request.path, request.query = _update_request_uri_query(request)
@@ -253,7 +252,7 @@ def delete_rule(self, topic_name, subscription_name, rule_name, fail_not_exist=F
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/rules/' + str(rule_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -281,7 +280,7 @@ def get_rule(self, topic_name, subscription_name, rule_name):
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/rules/' + str(rule_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -300,7 +299,7 @@ def list_rules(self, topic_name, subscription_name):
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/rules/'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -321,7 +320,7 @@ def create_subscription(self, topic_name, subscription_name, subscription=None,
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + ''
request.body = _get_request_body(convert_subscription_to_xml(subscription))
request.path, request.query = _update_request_uri_query(request)
@@ -349,7 +348,7 @@ def delete_subscription(self, topic_name, subscription_name, fail_not_exist=Fals
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -375,7 +374,7 @@ def get_subscription(self, topic_name, subscription_name):
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -392,7 +391,7 @@ def list_subscriptions(self, topic_name):
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -413,7 +412,7 @@ def send_topic_message(self, topic_name, message=None):
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'POST'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/messages'
request.headers = message.add_headers(request)
request.body = _get_request_body(message.body)
@@ -441,7 +440,7 @@ def peek_lock_subscription_message(self, topic_name, subscription_name, timeout=
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'POST'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
@@ -470,7 +469,7 @@ def unlock_subscription_message(self, topic_name, subscription_name, sequence_nu
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'PUT'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/messages/' + str(sequence_number) + '/' + str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -490,7 +489,7 @@ def read_delete_subscription_message(self, topic_name, subscription_name, timeou
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
@@ -518,7 +517,7 @@ def delete_subscription_message(self, topic_name, subscription_name, sequence_nu
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(topic_name) + '/subscriptions/' + str(subscription_name) + '/messages/' + str(sequence_number) + '/' + str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -537,7 +536,7 @@ def send_queue_message(self, queue_name, message=None):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'POST'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages'
request.headers = message.add_headers(request)
request.body = _get_request_body(message.body)
@@ -562,7 +561,7 @@ def peek_lock_queue_message(self, queue_name, timeout='60'):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'POST'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
@@ -589,7 +588,7 @@ def unlock_queue_message(self, queue_name, sequence_number, lock_token):
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'PUT'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages/' + str(sequence_number) + '/' + str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -607,7 +606,7 @@ def read_delete_queue_message(self, queue_name, timeout='60'):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
@@ -633,7 +632,7 @@ def delete_queue_message(self, queue_name, sequence_number, lock_token):
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = self.service_namespace + SERVICE_BUS_HOST_BASE
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages/' + str(sequence_number) + '/' + str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_service_bus_header(request, self.account_key, self.issuer)
@@ -652,11 +651,13 @@ def receive_subscription_message(self, topic_name, subscription_name, peek_lock=
else:
return self.read_delete_subscription_message(topic_name, subscription_name, timeout)
- def __init__(self, service_namespace=None, account_key=None, issuer=None, x_ms_version='2011-06-01'):
+ def __init__(self, service_namespace=None, account_key=None, issuer=None, x_ms_version='2011-06-01', host_base=SERVICE_BUS_HOST_BASE):
+ #x_ms_version is not used, but the parameter is kept for backwards compatibility
self.requestid = None
self.service_namespace = service_namespace
self.account_key = account_key
- self.issuer = issuer
+ self.issuer = issuer
+ self.host_base = host_base
#get service namespace, account key and issuer. If they are set when constructing, then use them.
#else find them from environment variables.
@@ -673,8 +674,7 @@ def __init__(self, service_namespace=None, account_key=None, issuer=None, x_ms_v
if not self.service_namespace or not self.account_key or not self.issuer:
raise WindowsAzureError('You need to provide servicebus namespace, access key and Issuer')
- self.x_ms_version = x_ms_version
- self._httpclient = _HTTPClient(service_instance=self, service_namespace=service_namespace, account_key=account_key, issuer=issuer, x_ms_version=self.x_ms_version)
+ self._httpclient = _HTTPClient(service_instance=self, service_namespace=service_namespace, account_key=account_key, issuer=issuer)
self._filter = self._httpclient.perform_request
def with_filter(self, filter):
@@ -685,7 +685,7 @@ def with_filter(self, filter):
request, pass it off to the next lambda, and then perform any post-processing
on the response.'''
res = ServiceBusService(self.service_namespace, self.account_key,
- self.issuer, self.x_ms_version)
+ self.issuer)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
@@ -693,13 +693,18 @@ def new_filter(request):
res._filter = new_filter
return res
+ def set_proxy(self, host, port):
+ '''Sets the proxy server host and port for the HTTP CONNECT Tunnelling.'''
+ self._httpclient.set_proxy(host, port)
+
+ def _get_host(self):
+ return self.service_namespace + self.host_base
+
def _perform_request(self, request):
try:
resp = self._filter(request)
except HTTPError as e:
return _service_bus_error_handler(e)
- if not resp:
- return None
return resp
diff --git a/src/azure/servicemanagement/__init__.py b/src/azure/servicemanagement/__init__.py
new file mode 100644
index 000000000000..5fe33fb49858
--- /dev/null
+++ b/src/azure/servicemanagement/__init__.py
@@ -0,0 +1,1067 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+import base64
+from azure.http import HTTPError
+from azure import (WindowsAzureError, WindowsAzureData, _general_error_handler,
+ _create_entry, _get_entry_properties, xml_escape,
+ _get_child_nodes, WindowsAzureMissingResourceError,
+ WindowsAzureConflictError, _get_serialization_name,
+ _list_of, _scalar_list_of, _dict_of, _Base64String,
+ _get_children_from_path, _get_first_child_node_value)
+import azure
+
+#-----------------------------------------------------------------------------
+# Constants for Azure app environment settings.
+AZURE_MANAGEMENT_CERTFILE = 'AZURE_MANAGEMENT_CERTFILE'
+AZURE_MANAGEMENT_SUBSCRIPTIONID = 'AZURE_MANAGEMENT_SUBSCRIPTIONID'
+
+#x-ms-version for service management.
+X_MS_VERSION = '2012-03-01'
+
+#-----------------------------------------------------------------------------
+# Data classes
+
+class StorageServices(WindowsAzureData):
+ def __init__(self):
+ self.storage_services = _list_of(StorageService)
+
+ def __iter__(self):
+ return iter(self.storage_services)
+
+ def __len__(self):
+ return len(self.storage_services)
+
+ def __getitem__(self, index):
+ return self.storage_services[index]
+
+class StorageService(WindowsAzureData):
+ def __init__(self):
+ self.url = ''
+ self.service_name = ''
+ self.storage_service_properties = StorageAccountProperties()
+ self.storage_service_keys = StorageServiceKeys()
+ self.extended_properties = _dict_of('ExtendedProperty', 'Name', 'Value')
+ self.capabilities = _scalar_list_of(str, 'Capability')
+
+class StorageAccountProperties(WindowsAzureData):
+ def __init__(self):
+ self.description = ''
+ self.affinity_group = ''
+ self.location = ''
+ self.label = _Base64String()
+ self.status = ''
+ self.endpoints = _scalar_list_of(str, 'Endpoint')
+ self.geo_replication_enabled = False
+ self.geo_primary_region = ''
+ self.status_of_primary = ''
+ self.geo_secondary_region = ''
+ self.status_of_secondary = ''
+ self.last_geo_failover_time = ''
+
+class StorageServiceKeys(WindowsAzureData):
+ def __init__(self):
+ self.primary = ''
+ self.secondary = ''
+
+class Locations(WindowsAzureData):
+ def __init__(self):
+ self.locations = _list_of(Location)
+
+ def __iter__(self):
+ return iter(self.locations)
+
+ def __len__(self):
+ return len(self.locations)
+
+ def __getitem__(self, index):
+ return self.locations[index]
+
+class Location(WindowsAzureData):
+ def __init__(self):
+ self.name = ''
+ self.display_name = ''
+ self.available_services = _scalar_list_of(str, 'AvailableService')
+
+class AffinityGroup(WindowsAzureData):
+ def __init__(self):
+ self.name = ''
+ self.label = _Base64String()
+ self.description = ''
+ self.location = ''
+ self.hosted_services = HostedServices()
+ self.storage_services = StorageServices()
+ self.capabilities = _scalar_list_of(str, 'Capability')
+
+class AffinityGroups(WindowsAzureData):
+ def __init__(self):
+ self.affinity_groups = _list_of(AffinityGroup)
+
+ def __iter__(self):
+ return iter(self.affinity_groups)
+
+ def __len__(self):
+ return len(self.affinity_groups)
+
+ def __getitem__(self, index):
+ return self.affinity_groups[index]
+
+class HostedServices(WindowsAzureData):
+ def __init__(self):
+ self.hosted_services = _list_of(HostedService)
+
+ def __iter__(self):
+ return iter(self.hosted_services)
+
+ def __len__(self):
+ return len(self.hosted_services)
+
+ def __getitem__(self, index):
+ return self.hosted_services[index]
+
+class HostedService(WindowsAzureData):
+ def __init__(self):
+ self.url = ''
+ self.service_name = ''
+ self.hosted_service_properties = HostedServiceProperties()
+ self.deployments = Deployments()
+
+class HostedServiceProperties(WindowsAzureData):
+ def __init__(self):
+ self.description = ''
+ self.location = ''
+ self.affinity_group = ''
+ self.label = _Base64String()
+ self.status = ''
+ self.date_created = ''
+ self.date_last_modified = ''
+ self.extended_properties = _dict_of('ExtendedProperty', 'Name', 'Value')
+
+class Deployments(WindowsAzureData):
+ def __init__(self):
+ self.deployments = _list_of(Deployment)
+
+ def __iter__(self):
+ return iter(self.deployments)
+
+ def __len__(self):
+ return len(self.deployments)
+
+ def __getitem__(self, index):
+ return self.deployments[index]
+
+class Deployment(WindowsAzureData):
+ def __init__(self):
+ self.name = ''
+ self.deployment_slot = ''
+ self.private_id = ''
+ self.status = ''
+ self.label = _Base64String()
+ self.url = ''
+ self.configuration = _Base64String()
+ self.role_instance_list = RoleInstanceList()
+ self.upgrade_status = UpgradeStatus()
+ self.upgrade_domain_count = ''
+ self.role_list = RoleList()
+ self.sdk_version = ''
+ self.input_endpoint_list = InputEndpoints()
+ self.locked = False
+ self.rollback_allowed = False
+ self.persistent_vm_downtime_info = PersistentVMDowntimeInfo()
+ self.created_time = ''
+ self.last_modified_time = ''
+ self.extended_properties = _dict_of('ExtendedProperty', 'Name', 'Value')
+
+class RoleInstanceList(WindowsAzureData):
+ def __init__(self):
+ self.role_instances = _list_of(RoleInstance)
+
+ def __iter__(self):
+ return iter(self.role_instances)
+
+ def __len__(self):
+ return len(self.role_instances)
+
+ def __getitem__(self, index):
+ return self.role_instances[index]
+
+class RoleInstance(WindowsAzureData):
+ def __init__(self):
+ self.role_name = ''
+ self.instance_name = ''
+ self.instance_status = ''
+ self.instance_upgrade_domain = 0
+ self.instance_fault_domain = 0
+ self.instance_size = ''
+ self.instance_state_details = ''
+ self.instance_error_code = ''
+ self.ip_address = ''
+ self.power_state = ''
+ self.fqdn = ''
+
+class UpgradeStatus(WindowsAzureData):
+ def __init__(self):
+ self.upgrade_type = ''
+ self.current_upgrade_domain_state = ''
+ self.current_upgrade_domain = ''
+
+class InputEndpoints(WindowsAzureData):
+ def __init__(self):
+ self.input_endpoints = _list_of(InputEndpoint)
+
+ def __iter__(self):
+ return iter(self.input_endpoints)
+
+ def __len__(self):
+ return len(self.input_endpoints)
+
+ def __getitem__(self, index):
+ return self.input_endpoints[index]
+
+class InputEndpoint(WindowsAzureData):
+ def __init__(self):
+ self.role_name = ''
+ self.vip = ''
+ self.port = ''
+
+class RoleList(WindowsAzureData):
+ def __init__(self):
+ self.roles = _list_of(Role)
+
+ def __iter__(self):
+ return iter(self.roles)
+
+ def __len__(self):
+ return len(self.roles)
+
+ def __getitem__(self, index):
+ return self.roles[index]
+
+class Role(WindowsAzureData):
+ def __init__(self):
+ self.role_name = ''
+ self.os_version = ''
+
+class PersistentVMDowntimeInfo(WindowsAzureData):
+ def __init__(self):
+ self.start_time = ''
+ self.end_time = ''
+ self.status = ''
+
+class Certificates(WindowsAzureData):
+ def __init__(self):
+ self.certificates = _list_of(Certificate)
+
+ def __iter__(self):
+ return iter(self.certificates)
+
+ def __len__(self):
+ return len(self.certificates)
+
+ def __getitem__(self, index):
+ return self.certificates[index]
+
+class Certificate(WindowsAzureData):
+ def __init__(self):
+ self.certificate_url = ''
+ self.thumbprint = ''
+ self.thumbprint_algorithm = ''
+ self.data = ''
+
+class OperationError(WindowsAzureData):
+ def __init__(self):
+ self.code = ''
+ self.message = ''
+
+class Operation(WindowsAzureData):
+ def __init__(self):
+ self.id = ''
+ self.status = ''
+ self.http_status_code = ''
+ self.error = OperationError()
+
+class OperatingSystem(WindowsAzureData):
+ def __init__(self):
+ self.version = ''
+ self.label = _Base64String()
+ self.is_default = True
+ self.is_active = True
+ self.family = 0
+ self.family_label = _Base64String()
+
+class OperatingSystems(WindowsAzureData):
+ def __init__(self):
+ self.operating_systems = _list_of(OperatingSystem)
+
+ def __iter__(self):
+ return iter(self.operating_systems)
+
+ def __len__(self):
+ return len(self.operating_systems)
+
+ def __getitem__(self, index):
+ return self.operating_systems[index]
+
+class OperatingSystemFamily(WindowsAzureData):
+ def __init__(self):
+ self.name = ''
+ self.label = _Base64String()
+ self.operating_systems = OperatingSystems()
+
+class OperatingSystemFamilies(WindowsAzureData):
+ def __init__(self):
+ self.operating_system_families = _list_of(OperatingSystemFamily)
+
+ def __iter__(self):
+ return iter(self.operating_system_families)
+
+ def __len__(self):
+ return len(self.operating_system_families)
+
+ def __getitem__(self, index):
+ return self.operating_system_families[index]
+
+class Subscription(WindowsAzureData):
+ def __init__(self):
+ self.subscription_id = ''
+ self.subscription_name = ''
+ self.subscription_status = ''
+ self.account_admin_live_email_id = ''
+ self.service_admin_live_email_id = ''
+ self.max_core_count = 0
+ self.max_storage_accounts = 0
+ self.max_hosted_services = 0
+ self.current_core_count = 0
+ self.current_hosted_services = 0
+ self.current_storage_accounts = 0
+ self.max_virtual_network_sites = 0
+ self.max_local_network_sites = 0
+ self.max_dns_servers = 0
+
+class AvailabilityResponse(WindowsAzureData):
+ def __init__(self):
+ self.result = False
+
+class SubscriptionCertificates(WindowsAzureData):
+ def __init__(self):
+ self.subscription_certificates = _list_of(SubscriptionCertificate)
+
+ def __iter__(self):
+ return iter(self.subscription_certificates)
+
+ def __len__(self):
+ return len(self.subscription_certificates)
+
+ def __getitem__(self, index):
+ return self.subscription_certificates[index]
+
+class SubscriptionCertificate(WindowsAzureData):
+ def __init__(self):
+ self.subscription_certificate_public_key = ''
+ self.subscription_certificate_thumbprint = ''
+ self.subscription_certificate_data = ''
+ self.created = ''
+
+class Images(WindowsAzureData):
+ def __init__(self):
+ self.images = _list_of(OSImage)
+
+ def __iter__(self):
+ return iter(self.images)
+
+ def __len__(self):
+ return len(self.images)
+
+ def __getitem__(self, index):
+ return self.images[index]
+
+class OSImage(WindowsAzureData):
+ def __init__(self):
+ self.affinity_group = ''
+ self.category = ''
+ self.location = ''
+ self.logical_size_in_gb = 0
+ self.label = ''
+ self.media_link = ''
+ self.name = ''
+ self.os = ''
+ self.eula = ''
+ self.description = ''
+
+class Disks(WindowsAzureData):
+ def __init__(self):
+ self.disks = _list_of(Disk)
+
+ def __iter__(self):
+ return iter(self.disks)
+
+ def __len__(self):
+ return len(self.disks)
+
+ def __getitem__(self, index):
+ return self.disks[index]
+
+class Disk(WindowsAzureData):
+ def __init__(self):
+ self.affinity_group = ''
+ self.attached_to = AttachedTo()
+ self.has_operating_system = ''
+ self.is_corrupted = ''
+ self.location = ''
+ self.logical_disk_size_in_gb = 0
+ self.label = ''
+ self.media_link= ''
+ self.name = ''
+ self.os = ''
+ self.source_image_name = ''
+
+class AttachedTo(WindowsAzureData):
+ def __init__(self):
+ self.hosted_service_name = ''
+ self.deployment_name = ''
+ self.role_name = ''
+
+class PersistentVMRole(WindowsAzureData):
+ def __init__(self):
+ self.role_name = ''
+ self.role_type= ''
+ self.os_version = '' # undocumented
+ self.configuration_sets = ConfigurationSets()
+ self.availability_set_name = ''
+ self.data_virtual_hard_disks = DataVirtualHardDisks()
+ self.os_virtual_hard_disk = OSVirtualHardDisk()
+ self.role_size = ''
+
+class ConfigurationSets(WindowsAzureData):
+ def __init__(self):
+ self.configuration_sets = _list_of(ConfigurationSet)
+
+ def __iter__(self):
+ return iter(self.configuration_sets)
+
+ def __len__(self):
+ return len(self.configuration_sets)
+
+ def __getitem__(self, index):
+ return self.configuration_sets[index]
+
+class ConfigurationSet(WindowsAzureData):
+ def __init__(self):
+ self.configuration_set_type = ''
+ self.role_type= ''
+ self.input_endpoints = ConfigurationSetInputEndpoints()
+ self.subnet_names = _scalar_list_of(str, 'SubnetName')
+
+class ConfigurationSetInputEndpoints(WindowsAzureData):
+ def __init__(self):
+ self.input_endpoints = _list_of(ConfigurationSetInputEndpoint, 'InputEndpoint')
+
+ def __iter__(self):
+ return iter(self.input_endpoints)
+
+ def __len__(self):
+ return len(self.input_endpoints)
+
+ def __getitem__(self, index):
+ return self.input_endpoints[index]
+
+class ConfigurationSetInputEndpoint(WindowsAzureData):
+ '''
+ Initializes a network configuration input endpoint.
+
+ name: Specifies the name for the external endpoint.
+ protocol: Specifies the protocol to use to inspect the virtual machine availability status. Possible values are: HTTP, TCP.
+ port: Specifies the external port to use for the endpoint.
+ local_port: Specifies the internal port on which the virtual machine is listening to serve the endpoint.
+ load_balanced_endpoint_set_name: Specifies a name for a set of load-balanced endpoints. Specifying this element for a given endpoint adds it to the set. If you are setting an endpoint to use to connect to the virtual machine via the Remote Desktop, do not set this property.
+ enable_direct_server_return: Specifies whether direct server return load balancing is enabled.
+ '''
+ def __init__(self, name='', protocol='', port='', local_port='', load_balanced_endpoint_set_name='', enable_direct_server_return=False):
+ self.enable_direct_server_return = enable_direct_server_return
+ self.load_balanced_endpoint_set_name = load_balanced_endpoint_set_name
+ self.local_port = local_port
+ self.name = name
+ self.port = port
+ self.load_balancer_probe = LoadBalancerProbe()
+ self.protocol = protocol
+
+class WindowsConfigurationSet(WindowsAzureData):
+ def __init__(self, computer_name=None, admin_password=None, reset_password_on_first_logon=None, enable_automatic_updates=None, time_zone=None):
+ self.configuration_set_type = 'WindowsProvisioningConfiguration'
+ self.computer_name = computer_name
+ self.admin_password = admin_password
+ self.reset_password_on_first_logon = reset_password_on_first_logon
+ self.enable_automatic_updates = enable_automatic_updates
+ self.time_zone = time_zone
+ self.domain_join = DomainJoin()
+ self.stored_certificate_settings = StoredCertificateSettings()
+
+class DomainJoin(WindowsAzureData):
+ def __init__(self):
+ self.credentials = Credentials()
+ self.join_domain = ''
+ self.machine_object_ou = ''
+
+class Credentials(WindowsAzureData):
+ def __init(self):
+ self.domain = ''
+ self.username = ''
+ self.password = ''
+
+class StoredCertificateSettings(WindowsAzureData):
+ def __init__(self):
+ self.stored_certificate_settings = _list_of(CertificateSetting)
+
+ def __iter__(self):
+ return iter(self.stored_certificate_settings)
+
+ def __len__(self):
+ return len(self.stored_certificate_settings)
+
+ def __getitem__(self, index):
+ return self.stored_certificate_settings[index]
+
+class CertificateSetting(WindowsAzureData):
+ '''
+ Initializes a certificate setting.
+
+ thumbprint: Specifies the thumbprint of the certificate to be provisioned. The thumbprint must specify an existing service certificate.
+ store_name: Specifies the name of the certificate store from which retrieve certificate.
+ store_location: Specifies the target certificate store location on the virtual machine. The only supported value is LocalMachine.
+ '''
+ def __init__(self, thumbprint='', store_name='', store_location=''):
+ self.thumbprint = thumbprint
+ self.store_name = store_name
+ self.store_location = store_location
+
+class LinuxConfigurationSet(WindowsAzureData):
+ def __init__(self, host_name=None, user_name=None, user_password=None, disable_ssh_password_authentication=None):
+ self.configuration_set_type = 'LinuxProvisioningConfiguration'
+ self.host_name = host_name
+ self.user_name = user_name
+ self.user_password = user_password
+ self.disable_ssh_password_authentication = disable_ssh_password_authentication
+ self.ssh = SSH()
+
+class SSH(WindowsAzureData):
+ def __init__(self):
+ self.public_keys = PublicKeys()
+ self.key_pairs = KeyPairs()
+
+class PublicKeys(WindowsAzureData):
+ def __init__(self):
+ self.public_keys = _list_of(PublicKey)
+
+ def __iter__(self):
+ return iter(self.public_keys)
+
+ def __len__(self):
+ return len(self.public_keys)
+
+ def __getitem__(self, index):
+ return self.public_keys[index]
+
+class PublicKey(WindowsAzureData):
+ def __init__(self):
+ self.finger_print = ''
+ self.path = ''
+
+class KeyPairs(WindowsAzureData):
+ def __init__(self):
+ self.key_pairs = _list_of(KeyPair)
+
+ def __iter__(self):
+ return iter(self.key_pairs)
+
+ def __len__(self):
+ return len(self.key_pairs)
+
+ def __getitem__(self, index):
+ return self.key_pairs[index]
+
+class KeyPair(WindowsAzureData):
+ def __init__(self):
+ self.finger_print = ''
+ self.path = ''
+
+class LoadBalancerProbe(WindowsAzureData):
+ def __init__(self):
+ self.path = ''
+ self.port = ''
+ self.protocol = ''
+
+class DataVirtualHardDisks(WindowsAzureData):
+ def __init__(self):
+ self.data_virtual_hard_disks = _list_of(DataVirtualHardDisk)
+
+ def __iter__(self):
+ return iter(self.data_virtual_hard_disks)
+
+ def __len__(self):
+ return len(self.data_virtual_hard_disks)
+
+ def __getitem__(self, index):
+ return self.data_virtual_hard_disks[index]
+
+class DataVirtualHardDisk(WindowsAzureData):
+ def __init__(self):
+ self.host_caching = ''
+ self.disk_label = ''
+ self.disk_name = ''
+ self.lun = 0
+ self.logical_disk_size_in_gb = 0
+ self.media_link = ''
+
+class OSVirtualHardDisk(WindowsAzureData):
+ def __init__(self, source_image_name=None, media_link=None, host_caching=None, disk_label=None, disk_name=None):
+ self.source_image_name = source_image_name
+ self.media_link = media_link
+ self.host_caching = host_caching
+ self.disk_label = disk_label
+ self.disk_name = disk_name
+ self.os = '' # undocumented, not used when adding a role
+
+class AsynchronousOperationResult(WindowsAzureData):
+ def __init__(self, request_id=None):
+ self.request_id = request_id
+
+def _update_management_header(request):
+ ''' Add additional headers for management. '''
+
+ if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
+ request.headers.append(('Content-Length', str(len(request.body))))
+
+ #append additional headers base on the service
+ request.headers.append(('x-ms-version', X_MS_VERSION))
+
+ # if it is not GET or HEAD request, must set content-type.
+ if not request.method in ['GET', 'HEAD']:
+ for name, value in request.headers:
+ if 'content-type' == name.lower():
+ break
+ else:
+ request.headers.append(('Content-Type', 'application/atom+xml;type=entry;charset=utf-8'))
+
+ return request.headers
+
+def _parse_response_for_async_op(response):
+ ''' Extracts request id from response header. '''
+
+ if response is None:
+ return None
+
+ result = AsynchronousOperationResult()
+ if response.headers:
+ for name, value in response.headers:
+ if name.lower() == 'x-ms-request-id':
+ result.request_id = value
+
+ return result
+
+def _management_error_handler(http_error):
+ ''' Simple error handler for management service. Will add more specific cases '''
+ return _general_error_handler(http_error)
+
+def _lower(text):
+ return text.lower()
+
+class _XmlSerializer(object):
+ @staticmethod
+ def create_storage_service_input_to_xml(service_name, description, label, affinity_group, location, geo_replication_enabled, extended_properties):
+ return _XmlSerializer.doc_from_data('CreateStorageServiceInput',
+ [('ServiceName', service_name),
+ ('Description', description),
+ ('Label', label, base64.b64encode),
+ ('AffinityGroup', affinity_group),
+ ('Location', location),
+ ('GeoReplicationEnabled', geo_replication_enabled, _lower)],
+ extended_properties)
+
+ @staticmethod
+ def update_storage_service_input_to_xml(description, label, geo_replication_enabled, extended_properties):
+ return _XmlSerializer.doc_from_data('UpdateStorageServiceInput',
+ [('Description', description),
+ ('Label', label, base64.b64encode),
+ ('GeoReplicationEnabled', geo_replication_enabled, _lower)],
+ extended_properties)
+
+ @staticmethod
+ def regenerate_keys_to_xml(key_type):
+ return _XmlSerializer.doc_from_data('RegenerateKeys',
+ [('KeyType', key_type)])
+
+ @staticmethod
+ def update_hosted_service_to_xml(label, description, extended_properties):
+ return _XmlSerializer.doc_from_data('UpdateHostedService',
+ [('Label', label, base64.b64encode),
+ ('Description', description)],
+ extended_properties)
+
+ @staticmethod
+ def create_hosted_service_to_xml(service_name, label, description, location, affinity_group, extended_properties):
+ return _XmlSerializer.doc_from_data('CreateHostedService',
+ [('ServiceName', service_name),
+ ('Label', label, base64.b64encode),
+ ('Description', description),
+ ('Location', location),
+ ('AffinityGroup', affinity_group)],
+ extended_properties)
+
+ @staticmethod
+ def create_deployment_to_xml(name, package_url, label, configuration, start_deployment, treat_warnings_as_error, extended_properties):
+ return _XmlSerializer.doc_from_data('CreateDeployment',
+ [('Name', name),
+ ('PackageUrl', package_url),
+ ('Label', label, base64.b64encode),
+ ('Configuration', configuration),
+ ('StartDeployment', start_deployment, _lower),
+ ('TreatWarningsAsError', treat_warnings_as_error, _lower)],
+ extended_properties)
+
+ @staticmethod
+ def swap_deployment_to_xml(production, source_deployment):
+ return _XmlSerializer.doc_from_data('Swap',
+ [('Production', production),
+ ('SourceDeployment', source_deployment)])
+
+ @staticmethod
+ def update_deployment_status_to_xml(status):
+ return _XmlSerializer.doc_from_data('UpdateDeploymentStatus',
+ [('Status', status)])
+
+ @staticmethod
+ def change_deployment_to_xml(configuration, treat_warnings_as_error, mode, extended_properties):
+ return _XmlSerializer.doc_from_data('ChangeConfiguration',
+ [('Configuration', configuration),
+ ('TreatWarningsAsError', treat_warnings_as_error, _lower),
+ ('Mode', mode)],
+ extended_properties)
+
+ @staticmethod
+ def upgrade_deployment_to_xml(mode, package_url, configuration, label, role_to_upgrade, force, extended_properties):
+ return _XmlSerializer.doc_from_data('UpgradeDeployment',
+ [('Mode', mode),
+ ('PackageUrl', package_url),
+ ('Configuration', configuration),
+ ('Label', label, base64.b64encode),
+ ('RoleToUpgrade', role_to_upgrade),
+ ('Force', force, _lower)],
+ extended_properties)
+
+ @staticmethod
+ def rollback_upgrade_to_xml(mode, force):
+ return _XmlSerializer.doc_from_data('RollbackUpdateOrUpgrade',
+ [('Mode', mode),
+ ('Force', force, _lower)])
+
+ @staticmethod
+ def walk_upgrade_domain_to_xml(upgrade_domain):
+ return _XmlSerializer.doc_from_data('WalkUpgradeDomain',
+ [('UpgradeDomain', upgrade_domain)])
+
+ @staticmethod
+ def certificate_file_to_xml(data, certificate_format, password):
+ return _XmlSerializer.doc_from_data('CertificateFile',
+ [('Data', data),
+ ('CertificateFormat', certificate_format),
+ ('Password', password)])
+
+ @staticmethod
+ def create_affinity_group_to_xml(name, label, description, location):
+ return _XmlSerializer.doc_from_data('CreateAffinityGroup',
+ [('Name', name),
+ ('Label', label, base64.b64encode),
+ ('Description', description),
+ ('Location', location)])
+
+ @staticmethod
+ def update_affinity_group_to_xml(label, description):
+ return _XmlSerializer.doc_from_data('UpdateAffinityGroup',
+ [('Label', label, base64.b64encode),
+ ('Description', description)])
+
+ @staticmethod
+ def subscription_certificate_to_xml(public_key, thumbprint, data):
+ return _XmlSerializer.doc_from_data('SubscriptionCertificate',
+ [('SubscriptionCertificatePublicKey', public_key),
+ ('SubscriptionCertificateThumbprint', thumbprint),
+ ('SubscriptionCertificateData', data)])
+
+ @staticmethod
+ def os_image_to_xml(label, media_link, name, os):
+ return _XmlSerializer.doc_from_data('OSImage',
+ [('Label', label),
+ ('MediaLink', media_link),
+ ('Name', name),
+ ('OS', os)])
+
+ @staticmethod
+ def data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, lun, logical_disk_size_in_gb, media_link, source_media_link):
+ return _XmlSerializer.doc_from_data('DataVirtualHardDisk',
+ [('HostCaching', host_caching),
+ ('DiskLabel', disk_label),
+ ('DiskName', disk_name),
+ ('Lun', lun),
+ ('LogicalDiskSizeInGB', logical_disk_size_in_gb),
+ ('MediaLink', media_link),
+ ('SourceMediaLink', source_media_link)])
+
+ @staticmethod
+ def disk_to_xml(has_operating_system, label, media_link, name, os):
+ return _XmlSerializer.doc_from_data('Disk',
+ [('HasOperatingSystem', has_operating_system, _lower),
+ ('Label', label),
+ ('MediaLink', media_link),
+ ('Name', name),
+ ('OS', os)])
+
+ @staticmethod
+ def restart_role_operation_to_xml():
+ return _XmlSerializer.doc_from_xml('RestartRoleOperation',
+ 'RestartRoleOperation')
+
+ @staticmethod
+ def shutdown_role_operation_to_xml():
+ return _XmlSerializer.doc_from_xml('ShutdownRoleOperation',
+ 'ShutdownRoleOperation')
+
+ @staticmethod
+ def start_role_operation_to_xml():
+ return _XmlSerializer.doc_from_xml('StartRoleOperation',
+ 'StartRoleOperation')
+
+ @staticmethod
+ def windows_configuration_to_xml(configuration):
+ xml = _XmlSerializer.data_to_xml([('ConfigurationSetType', configuration.configuration_set_type),
+ ('ComputerName', configuration.computer_name),
+ ('AdminPassword', configuration.admin_password, base64.b64encode),
+ ('ResetPasswordOnFirstLogon', configuration.reset_password_on_first_logon, _lower),
+ ('EnableAutomaticUpdates', configuration.enable_automatic_updates, _lower),
+ ('TimeZone', configuration.time_zone)])
+
+ if configuration.domain_join is not None:
+ xml += ''
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('Domain', configuration.domain_join.credentials.domain),
+ ('Username', configuration.domain_join.credentials.username),
+ ('Password', configuration.domain_join.credentials.password)])
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('JoinDomain', configuration.domain_join.join_domain),
+ ('MachineObjectOU', configuration.domain_join.machine_object_ou)])
+ xml += ''
+ if configuration.stored_certificate_settings is not None:
+ xml += ''
+ for cert in configuration.stored_certificate_settings:
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('StoreLocation', cert.store_location),
+ ('StoreName', cert.store_name),
+ ('Thumbprint', cert.thumbprint)])
+ xml += ''
+ xml += ''
+ return xml
+
+ @staticmethod
+ def linux_configuration_to_xml(configuration):
+ xml = _XmlSerializer.data_to_xml([('ConfigurationSetType', configuration.configuration_set_type),
+ ('HostName', configuration.host_name),
+ ('UserName', configuration.user_name),
+ ('UserPassword', configuration.user_password),
+ ('DisableSshPasswordAuthentication', configuration.disable_ssh_password_authentication, _lower)])
+
+ if configuration.ssh is not None:
+ xml += ''
+ xml += ''
+ for key in configuration.ssh.public_keys:
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('FingerPrint', key.finger_print),
+ ('Path', key.path)])
+ xml += ''
+ xml += ''
+ xml += ''
+ for key in configuration.ssh.key_pairs:
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('FingerPrint', key.finger_print),
+ ('Path', key.path)])
+ xml += ''
+ xml += ''
+ xml += ''
+ return xml
+
+ @staticmethod
+ def network_configuration_to_xml(configuration):
+ xml = _XmlSerializer.data_to_xml([('ConfigurationSetType', configuration.configuration_set_type)])
+ xml += ''
+ for endpoint in configuration.input_endpoints:
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('EnableDirectServerReturn', endpoint.enable_direct_server_return, _lower),
+ ('LoadBalancedEndpointSetName', endpoint.load_balanced_endpoint_set_name),
+ ('LocalPort', endpoint.local_port),
+ ('Name', endpoint.name),
+ ('Port', endpoint.port)])
+
+ if endpoint.load_balancer_probe.path or endpoint.load_balancer_probe.port or endpoint.load_balancer_probe.protocol:
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('Path', endpoint.load_balancer_probe.path),
+ ('Port', endpoint.load_balancer_probe.port),
+ ('Protocol', endpoint.load_balancer_probe.protocol)])
+ xml += ''
+
+ xml += _XmlSerializer.data_to_xml([('Protocol', endpoint.protocol)])
+ xml += ''
+ xml += ''
+ xml += ''
+ for name in configuration.subnet_names:
+ xml += _XmlSerializer.data_to_xml([('SubnetName', name)])
+ xml += ''
+ return xml
+
+ @staticmethod
+ def role_to_xml(availability_set_name, data_virtual_hard_disks, network_configuration_set, os_virtual_hard_disk, role_name, role_size, role_type, system_configuration_set):
+ xml = _XmlSerializer.data_to_xml([('RoleName', role_name),
+ ('RoleType', role_type)])
+
+ xml += ''
+
+ if system_configuration_set is not None:
+ xml += ''
+ if isinstance(system_configuration_set, WindowsConfigurationSet):
+ xml += _XmlSerializer.windows_configuration_to_xml(system_configuration_set)
+ elif isinstance(system_configuration_set, LinuxConfigurationSet):
+ xml += _XmlSerializer.linux_configuration_to_xml(system_configuration_set)
+ xml += ''
+
+ if network_configuration_set is not None:
+ xml += ''
+ xml += _XmlSerializer.network_configuration_to_xml(network_configuration_set)
+ xml += ''
+
+ xml += ''
+
+ if availability_set_name is not None:
+ xml += _XmlSerializer.data_to_xml([('AvailabilitySetName', availability_set_name)])
+
+ if data_virtual_hard_disks is not None:
+ xml += ''
+ for hd in data_virtual_hard_disks:
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('HostCaching', hd.host_caching),
+ ('DiskLabel', hd.disk_label),
+ ('DiskName', hd.disk_name),
+ ('Lun', hd.lun),
+ ('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb),
+ ('MediaLink', hd.media_link)])
+ xml += ''
+ xml += ''
+
+ if os_virtual_hard_disk is not None:
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('HostCaching', os_virtual_hard_disk.host_caching),
+ ('DiskLabel', os_virtual_hard_disk.disk_label),
+ ('DiskName', os_virtual_hard_disk.disk_name),
+ ('MediaLink', os_virtual_hard_disk.media_link),
+ ('SourceImageName', os_virtual_hard_disk.source_image_name)])
+ xml += ''
+
+ if role_size is not None:
+ xml += _XmlSerializer.data_to_xml([('RoleSize', role_size)])
+
+ return xml
+
+ @staticmethod
+ def add_role_to_xml(role_name, system_configuration_set, os_virtual_hard_disk, role_type, network_configuration_set, availability_set_name, data_virtual_hard_disks, role_size):
+ xml = _XmlSerializer.role_to_xml(availability_set_name, data_virtual_hard_disks, network_configuration_set, os_virtual_hard_disk, role_name, role_size, role_type, system_configuration_set)
+ return _XmlSerializer.doc_from_xml('PersistentVMRole', xml)
+
+ @staticmethod
+ def update_role_to_xml(role_name, os_virtual_hard_disk, role_type, network_configuration_set, availability_set_name, data_virtual_hard_disks, role_size):
+ xml = _XmlSerializer.role_to_xml(availability_set_name, data_virtual_hard_disks, network_configuration_set, os_virtual_hard_disk, role_name, role_size, role_type, None)
+ return _XmlSerializer.doc_from_xml('PersistentVMRole', xml)
+
+ @staticmethod
+ def capture_role_to_xml(post_capture_action, target_image_name, target_image_label, provisioning_configuration):
+ xml = _XmlSerializer.data_to_xml([('OperationType', 'CaptureRoleOperation'),
+ ('PostCaptureAction', post_capture_action)])
+ if provisioning_configuration is not None:
+ xml += ''
+ if isinstance(provisioning_configuration, WindowsConfigurationSet):
+ xml += _XmlSerializer.windows_configuration_to_xml(provisioning_configuration)
+ elif isinstance(provisioning_configuration, LinuxConfigurationSet):
+ xml += _XmlSerializer.linux_configuration_to_xml(provisioning_configuration)
+ xml += ''
+ xml += _XmlSerializer.data_to_xml([('TargetImageLabel', target_image_label),
+ ('TargetImageName', target_image_name)])
+ return _XmlSerializer.doc_from_xml('CaptureRoleOperation', xml)
+
+ @staticmethod
+ def virtual_machine_deployment_to_xml(deployment_name, deployment_slot, label, role_name, system_configuration_set, os_virtual_hard_disk, role_type, network_configuration_set, availability_set_name, data_virtual_hard_disks, role_size):
+ xml = _XmlSerializer.data_to_xml([('Name', deployment_name),
+ ('DeploymentSlot', deployment_slot),
+ ('Label', label, base64.b64encode)])
+ xml += ''
+ xml += ''
+ xml += _XmlSerializer.role_to_xml(availability_set_name, data_virtual_hard_disks, network_configuration_set, os_virtual_hard_disk, role_name, role_size, role_type, system_configuration_set)
+ xml += ''
+ xml += ''
+ return _XmlSerializer.doc_from_xml('Deployment', xml)
+
+ @staticmethod
+ def data_to_xml(data):
+ '''Creates an xml fragment from the specified data.
+ data: Array of tuples, where first: xml element name
+ second: xml element text
+ third: conversion function
+ '''
+ xml = ''
+ for element in data:
+ name = element[0]
+ val = element[1]
+ if len(element) > 2:
+ converter = element[2]
+ else:
+ converter = None
+
+ if val is not None:
+ if converter is not None:
+ text = converter(str(val))
+ else:
+ text = str(val)
+ xml += ''.join(['<', name, '>', text, '', name, '>'])
+ return xml
+
+ @staticmethod
+ def doc_from_xml(document_element_name, inner_xml):
+ '''Wraps the specified xml in an xml root element with default azure namespaces'''
+ xml = ''.join(['<', document_element_name, ' xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.microsoft.com/windowsazure">'])
+ xml += inner_xml
+ xml += ''.join(['', document_element_name, '>'])
+ return xml
+
+ @staticmethod
+ def doc_from_data(document_element_name, data, extended_properties=None):
+ xml = _XmlSerializer.data_to_xml(data)
+ if extended_properties is not None:
+ xml += _XmlSerializer.extended_properties_dict_to_xml_fragment(extended_properties)
+ return _XmlSerializer.doc_from_xml(document_element_name, xml)
+
+ @staticmethod
+ def extended_properties_dict_to_xml_fragment(extended_properties):
+ xml = ''
+ if extended_properties is not None and len(extended_properties) > 0:
+ xml += ''
+ for key, val in extended_properties.items():
+ xml += ''.join(['', '', str(key), '', '', str(val), '', ''])
+ xml += ''
+ return xml
+
+from azure.servicemanagement.servicemanagementservice import ServiceManagementService
diff --git a/src/azure/servicemanagement/servicemanagementservice.py b/src/azure/servicemanagement/servicemanagementservice.py
new file mode 100644
index 000000000000..c9f287f45a7e
--- /dev/null
+++ b/src/azure/servicemanagement/servicemanagementservice.py
@@ -0,0 +1,1468 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+import base64
+import os
+import urllib2
+
+from azure.http.httpclient import _HTTPClient
+from azure.http import HTTPError
+from azure.servicemanagement import *
+from azure.servicemanagement import (_update_management_header,
+ _management_error_handler,
+ _parse_response_for_async_op,
+ _XmlSerializer)
+from azure.http import HTTPRequest
+from azure import (_validate_not_none,
+ _get_request_body, _update_request_uri_query,
+ WindowsAzureError, _parse_response,
+ MANAGEMENT_HOST)
+
+class ServiceManagementService:
+ def __init__(self, subscription_id=None, cert_file=None, host=MANAGEMENT_HOST):
+ self.requestid = None
+ self.subscription_id = subscription_id
+ self.cert_file = cert_file
+ self.host = host
+
+ if not self.cert_file:
+ if os.environ.has_key(AZURE_MANAGEMENT_CERTFILE):
+ self.cert_file = os.environ[AZURE_MANAGEMENT_CERTFILE]
+
+ if not self.subscription_id:
+ if os.environ.has_key(AZURE_MANAGEMENT_SUBSCRIPTIONID):
+ self.subscription_id = os.environ[AZURE_MANAGEMENT_SUBSCRIPTIONID]
+
+ if not self.cert_file or not self.subscription_id:
+ raise WindowsAzureError('You need to provide subscription id and certificate file')
+
+ self._httpclient = _HTTPClient(service_instance=self, cert_file=self.cert_file)
+ self._filter = self._httpclient.perform_request
+
+ def with_filter(self, filter):
+ '''Returns a new service which will process requests with the
+ specified filter. Filtering operations can include logging, automatic
+ retrying, etc... The filter is a lambda which receives the HTTPRequest
+ and another lambda. The filter can perform any pre-processing on the
+ request, pass it off to the next lambda, and then perform any post-processing
+ on the response.'''
+ res = ServiceManagementService(self.subscription_id, self.cert_file)
+ old_filter = self._filter
+ def new_filter(request):
+ return filter(request, old_filter)
+
+ res._filter = new_filter
+ return res
+
+ def set_proxy(self, host, port):
+ '''Sets the proxy server host and port for the HTTP CONNECT Tunnelling.'''
+ self._httpclient.set_proxy(host, port)
+
+ #--Operations for storage accounts -----------------------------------
+ def list_storage_accounts(self):
+ '''
+ Lists the storage accounts available under the current subscription.
+ '''
+ return self._perform_get(self._get_storage_service_path(),
+ StorageServices)
+
+ def get_storage_account_properties(self, service_name):
+ '''
+ Returns system properties for the specified storage account.
+
+ service_name: Name of the storage service account.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_get(self._get_storage_service_path(service_name),
+ StorageService)
+
+ def get_storage_account_keys(self, service_name):
+ '''
+ Returns the primary and secondary access keys for the specified storage account.
+
+ service_name: Name of the storage service account.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_get(self._get_storage_service_path(service_name) + '/keys',
+ StorageService)
+
+ def regenerate_storage_account_keys(self, service_name, key_type):
+ '''
+ Regenerates the primary or secondary access key for the specified storage account.
+
+ service_name: Name of the storage service account.
+ key_type: Specifies which key to regenerate. Valid values are: Primary, Secondary
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('key_type', key_type)
+ return self._perform_post(self._get_storage_service_path(service_name) + '/keys?action=regenerate',
+ _XmlSerializer.regenerate_keys_to_xml(key_type),
+ StorageService)
+
+ def create_storage_account(self, service_name, description, label, affinity_group=None, location=None, geo_replication_enabled=True, extended_properties=None):
+ '''
+ Creates a new storage account in Windows Azure.
+
+ service_name: A name for the storage account that is unique within
+ Windows Azure. Storage account names must be between 3
+ and 24 characters in length and use numbers and
+ lower-case letters only.
+ description: A description for the storage account. The description
+ may be up to 1024 characters in length.
+ label: A name for the storage account specified as a base64-encoded
+ string. The name may be up to 100 characters in length. The
+ name can be used identify the storage account for your tracking
+ purposes.
+ affinity_group: The name of an existing affinity group in the
+ specified subscription. You can specify either a
+ location or affinity_group, but not both.
+ location: The location where the storage account is created. You can
+ specify either a location or affinity_group, but not both.
+ geo_replication_enabled: Specifies whether the storage account is
+ created with the geo-replication enabled. If
+ the element is not included in the request
+ body, the default value is true. If set to
+ true, the data in the storage account is
+ replicated across more than one geographic
+ location so as to enable resilience in the
+ face of catastrophic service loss.
+ extended_properties: Dictionary containing name/value pairs of storage
+ account properties. You can have a maximum of 50
+ extended property name/value pairs. The maximum
+ length of the Name element is 64 characters, only
+ alphanumeric characters and underscores are valid
+ in the Name, and the name must start with a
+ letter. The value has a maximum length of 255
+ characters.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('description', description)
+ _validate_not_none('label', label)
+ if affinity_group is None and location is None:
+ raise WindowsAzureError('location or affinity_group must be specified')
+ if affinity_group is not None and location is not None:
+ raise WindowsAzureError('Only one of location or affinity_group needs to be specified')
+ return self._perform_post(self._get_storage_service_path(),
+ _XmlSerializer.create_storage_service_input_to_xml(service_name, description, label, affinity_group, location, geo_replication_enabled, extended_properties),
+ async=True)
+
+ def update_storage_account(self, service_name, description=None, label=None, geo_replication_enabled=None, extended_properties=None):
+ '''
+ Updates the label, the description, and enables or disables the
+ geo-replication status for a storage account in Windows Azure.
+
+ service_name: Name of the storage service account.
+ description: A description for the storage account. The description
+ may be up to 1024 characters in length.
+ label: A name for the storage account specified as a base64-encoded
+ string. The name may be up to 100 characters in length. The
+ name can be used identify the storage account for your tracking
+ purposes.
+ geo_replication_enabled: Specifies whether the storage account is
+ created with the geo-replication enabled. If
+ the element is not included in the request
+ body, the default value is true. If set to
+ true, the data in the storage account is
+ replicated across more than one geographic
+ location so as to enable resilience in the
+ face of catastrophic service loss.
+ extended_properties: Dictionary containing name/value pairs of storage
+ account properties. You can have a maximum of 50
+ extended property name/value pairs. The maximum
+ length of the Name element is 64 characters, only
+ alphanumeric characters and underscores are valid
+ in the Name, and the name must start with a
+ letter. The value has a maximum length of 255
+ characters.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_put(self._get_storage_service_path(service_name),
+ _XmlSerializer.update_storage_service_input_to_xml(description, label, geo_replication_enabled, extended_properties))
+
+ def delete_storage_account(self, service_name):
+ '''
+ Deletes the specified storage account from Windows Azure.
+
+ service_name: Name of the storage service account.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_delete(self._get_storage_service_path(service_name))
+
+
+ def check_storage_account_name_availability(self, service_name):
+ '''
+ Checks to see if the specified storage account name is available, or
+ if it has already been taken.
+
+ service_name: Name of the storage service account.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_get(self._get_storage_service_path() + '/operations/isavailable/' + str(service_name) + '',
+ AvailabilityResponse)
+
+ #--Operations for hosted services ------------------------------------
+ def list_hosted_services(self):
+ '''
+ Lists the hosted services available under the current subscription.
+ '''
+ return self._perform_get(self._get_hosted_service_path(),
+ HostedServices)
+
+ def get_hosted_service_properties(self, service_name, embed_detail=False):
+ '''
+ Retrieves system properties for the specified hosted service. These
+ properties include the service name and service type; the name of the
+ affinity group to which the service belongs, or its location if it is
+ not part of an affinity group; and optionally, information on the
+ service's deployments.
+
+ service_name: Name of the hosted service.
+ embed_detail: When True, the management service returns properties for
+ all deployments of the service, as well as for the
+ service itself.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('embed_detail', embed_detail)
+ return self._perform_get(self._get_hosted_service_path(service_name) + '?embed-detail=' + str(embed_detail).lower(),
+ HostedService)
+
+ def create_hosted_service(self, service_name, label, description=None, location=None, affinity_group=None, extended_properties=None):
+ '''
+ Creates a new hosted service in Windows Azure.
+
+ service_name: A name for the hosted service that is unique within
+ Windows Azure. This name is the DNS prefix name and can
+ be used to access the hosted service.
+ label: A name for the hosted service that is base-64 encoded. The name
+ can be up to 100 characters in length. The name can be used
+ identify the storage account for your tracking purposes.
+ description: A description for the hosted service. The description can
+ be up to 1024 characters in length.
+ location: The location where the hosted service will be created. You
+ can specify either a location or affinity_group, but not
+ both.
+ affinity_group: The name of an existing affinity group associated with
+ this subscription. This name is a GUID and can be
+ retrieved by examining the name element of the response
+ body returned by list_affinity_groups. You can specify
+ either a location or affinity_group, but not both.
+ extended_properties: Dictionary containing name/value pairs of
+ extended hosted service properties. You can have
+ a maximum of 50 extended property name/value
+ pairs. The maximum length of the Name element is
+ 64 characters, only alphanumeric characters and
+ underscores are valid in the Name, and the name
+ must start with a letter. The value has a maximum
+ length of 255 characters.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('label', label)
+ if affinity_group is None and location is None:
+ raise WindowsAzureError('location or affinity_group must be specified')
+ if affinity_group is not None and location is not None:
+ raise WindowsAzureError('Only one of location or affinity_group needs to be specified')
+ return self._perform_post(self._get_hosted_service_path(),
+ _XmlSerializer.create_hosted_service_to_xml(service_name, label, description, location, affinity_group, extended_properties))
+
+ def update_hosted_service(self, service_name, label=None, description=None, extended_properties=None):
+ '''
+ Updates the label and/or the description for a hosted service in
+ Windows Azure.
+
+ service_name: Name of the hosted service.
+ label: A name for the hosted service that is base64-encoded. The name
+ may be up to 100 characters in length. You must specify a value
+ for either Label or Description, or for both. It is recommended
+ that the label be unique within the subscription. The name can
+ be used identify the hosted service for your tracking purposes.
+ description: A description for the hosted service. The description may
+ be up to 1024 characters in length. You must specify a
+ value for either Label or Description, or for both.
+ extended_properties: Dictionary containing name/value pairs of
+ extended hosted service properties. You can have
+ a maximum of 50 extended property name/value
+ pairs. The maximum length of the Name element is
+ 64 characters, only alphanumeric characters and
+ underscores are valid in the Name, and the name
+ must start with a letter. The value has a maximum
+ length of 255 characters.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_put(self._get_hosted_service_path(service_name),
+ _XmlSerializer.update_hosted_service_to_xml(label, description, extended_properties))
+
+ def delete_hosted_service(self, service_name):
+ '''
+ Deletes the specified hosted service from Windows Azure.
+
+ service_name: Name of the hosted service.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_delete(self._get_hosted_service_path(service_name))
+
+ def get_deployment_by_slot(self, service_name, deployment_slot):
+ '''
+ Returns configuration information, status, and system properties for
+ a deployment.
+
+ service_name: Name of the hosted service.
+ deployment_slot: The environment to which the hosted service is
+ deployed. Valid values are: staging, production
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_slot', deployment_slot)
+ return self._perform_get(self._get_deployment_path_using_slot(service_name, deployment_slot),
+ Deployment)
+
+ def get_deployment_by_name(self, service_name, deployment_name):
+ '''
+ Returns configuration information, status, and system properties for a
+ deployment.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ return self._perform_get(self._get_deployment_path_using_name(service_name, deployment_name),
+ Deployment)
+
+ def create_deployment(self, service_name, deployment_slot, name, package_url, label, configuration, start_deployment=False, treat_warnings_as_error=False, extended_properties=None):
+ '''
+ Uploads a new service package and creates a new deployment on staging
+ or production.
+
+ service_name: Name of the hosted service.
+ deployment_slot: The environment to which the hosted service is
+ deployed. Valid values are: staging, production
+ name: The name for the deployment. The deployment name must be unique
+ among other deployments for the hosted service.
+ package_url: A URL that refers to the location of the service package
+ in the Blob service. The service package can be located
+ either in a storage account beneath the same subscription
+ or a Shared Access Signature (SAS) URI from any storage
+ account.
+ label: A name for the hosted service that is base-64 encoded. The name
+ can be up to 100 characters in length. It is recommended that
+ the label be unique within the subscription. The name can be
+ used identify the hosted service for your tracking purposes.
+ configuration: The base-64 encoded service configuration file for the
+ deployment.
+ start_deployment: Indicates whether to start the deployment
+ immediately after it is created. If false, the
+ service model is still deployed to the virtual
+ machines but the code is not run immediately.
+ Instead, the service is Suspended until you call
+ Update Deployment Status and set the status to
+ Running, at which time the service will be started.
+ A deployed service still incurs charges, even if it
+ is suspended.
+ treat_warnings_as_error: Indicates whether to treat package validation
+ warnings as errors. If set to true, the
+ Created Deployment operation fails if there
+ are validation warnings on the service package.
+ extended_properties: Dictionary containing name/value pairs of
+ extended hosted service properties. You can have
+ a maximum of 50 extended property name/value
+ pairs. The maximum length of the Name element is
+ 64 characters, only alphanumeric characters and
+ underscores are valid in the Name, and the name
+ must start with a letter. The value has a maximum
+ length of 255 characters.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_slot', deployment_slot)
+ _validate_not_none('name', name)
+ _validate_not_none('package_url', package_url)
+ _validate_not_none('label', label)
+ _validate_not_none('configuration', configuration)
+ return self._perform_post(self._get_deployment_path_using_slot(service_name, deployment_slot),
+ _XmlSerializer.create_deployment_to_xml(name, package_url, label, configuration, start_deployment, treat_warnings_as_error, extended_properties),
+ async=True)
+
+ def delete_deployment(self, service_name, deployment_name):
+ '''
+ Deletes the specified deployment.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ return self._perform_delete(self._get_deployment_path_using_name(service_name, deployment_name),
+ async=True)
+
+ def swap_deployment(self, service_name, production, source_deployment):
+ '''
+ Initiates a virtual IP swap between the staging and production
+ deployment environments for a service. If the service is currently
+ running in the staging environment, it will be swapped to the
+ production environment. If it is running in the production
+ environment, it will be swapped to staging.
+
+ service_name: Name of the hosted service.
+ production: The name of the production deployment.
+ source_deployment: The name of the source deployment.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('production', production)
+ _validate_not_none('source_deployment', source_deployment)
+ return self._perform_post(self._get_hosted_service_path(service_name),
+ _XmlSerializer.swap_deployment_to_xml(production, source_deployment),
+ async=True)
+
+ def change_deployment_configuration(self, service_name, deployment_name, configuration, treat_warnings_as_error=False, mode='Auto', extended_properties=None):
+ '''
+ Initiates a change to the deployment configuration.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ configuration: The base-64 encoded service configuration file for the
+ deployment.
+ treat_warnings_as_error: Indicates whether to treat package validation
+ warnings as errors. If set to true, the
+ Created Deployment operation fails if there
+ are validation warnings on the service
+ package.
+ mode: If set to Manual, WalkUpgradeDomain must be called to apply the
+ update. If set to Auto, the Windows Azure platform will
+ automatically apply the update To each upgrade domain for the
+ service. Possible values are: Auto, Manual
+ extended_properties: Dictionary containing name/value pairs of
+ extended hosted service properties. You can have
+ a maximum of 50 extended property name/value
+ pairs. The maximum length of the Name element is
+ 64 characters, only alphanumeric characters and
+ underscores are valid in the Name, and the name
+ must start with a letter. The value has a maximum
+ length of 255 characters.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('configuration', configuration)
+ return self._perform_post(self._get_deployment_path_using_name(service_name, deployment_name) + '/?comp=config',
+ _XmlSerializer.change_deployment_to_xml(configuration, treat_warnings_as_error, mode, extended_properties),
+ async=True)
+
+ def update_deployment_status(self, service_name, deployment_name, status):
+ '''
+ Initiates a change in deployment status.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ status: The change to initiate to the deployment status. Possible
+ values include: Running, Suspended
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('status', status)
+ return self._perform_post(self._get_deployment_path_using_name(service_name, deployment_name) + '/?comp=status',
+ _XmlSerializer.update_deployment_status_to_xml(status),
+ async=True)
+
+ def upgrade_deployment(self, service_name, deployment_name, mode, package_url, configuration, label, force, role_to_upgrade=None, extended_properties=None):
+ '''
+ Initiates an upgrade.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ mode: If set to Manual, WalkUpgradeDomain must be called to apply the
+ update. If set to Auto, the Windows Azure platform will
+ automatically apply the update To each upgrade domain for the
+ service. Possible values are: Auto, Manual
+ package_url: A URL that refers to the location of the service package
+ in the Blob service. The service package can be located
+ either in a storage account beneath the same subscription
+ or a Shared Access Signature (SAS) URI from any storage
+ account.
+ configuration: The base-64 encoded service configuration file for the
+ deployment.
+ label: A name for the hosted service that is base-64 encoded. The name
+ can be up to 100 characters in length. It is recommended that
+ the label be unique within the subscription. The name can be
+ used identify the hosted service for your tracking purposes.
+ force: Specifies whether the rollback should proceed even when it will
+ cause local data to be lost from some role instances. True if
+ the rollback should proceed; otherwise false if the rollback
+ should fail.
+ role_to_upgrade: The name of the specific role to upgrade.
+ extended_properties: Dictionary containing name/value pairs of
+ extended hosted service properties. You can have
+ a maximum of 50 extended property name/value
+ pairs. The maximum length of the Name element is
+ 64 characters, only alphanumeric characters and
+ underscores are valid in the Name, and the name
+ must start with a letter. The value has a maximum
+ length of 255 characters.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('mode', mode)
+ _validate_not_none('package_url', package_url)
+ _validate_not_none('configuration', configuration)
+ _validate_not_none('label', label)
+ _validate_not_none('force', force)
+ return self._perform_post(self._get_deployment_path_using_name(service_name, deployment_name) + '/?comp=upgrade',
+ _XmlSerializer.upgrade_deployment_to_xml(mode, package_url, configuration, label, role_to_upgrade, force, extended_properties),
+ async=True)
+
+ def walk_upgrade_domain(self, service_name, deployment_name, upgrade_domain):
+ '''
+ Specifies the next upgrade domain to be walked during manual in-place
+ upgrade or configuration change.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ upgrade_domain: An integer value that identifies the upgrade domain
+ to walk. Upgrade domains are identified with a
+ zero-based index: the first upgrade domain has an ID
+ of 0, the second has an ID of 1, and so on.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('upgrade_domain', upgrade_domain)
+ return self._perform_post(self._get_deployment_path_using_name(service_name, deployment_name) + '/?comp=walkupgradedomain',
+ _XmlSerializer.walk_upgrade_domain_to_xml(upgrade_domain),
+ async=True)
+
+ def rollback_update_or_upgrade(self, service_name, deployment_name, mode, force):
+ '''
+ Cancels an in progress configuration change (update) or upgrade and
+ returns the deployment to its state before the upgrade or
+ configuration change was started.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ mode: Specifies whether the rollback should proceed automatically.
+ auto - The rollback proceeds without further user input.
+ manual - You must call the Walk Upgrade Domain operation to
+ apply the rollback to each upgrade domain.
+ force: Specifies whether the rollback should proceed even when it will
+ cause local data to be lost from some role instances. True if
+ the rollback should proceed; otherwise false if the rollback
+ should fail.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('mode', mode)
+ _validate_not_none('force', force)
+ return self._perform_post(self._get_deployment_path_using_name(service_name, deployment_name) + '/?comp=rollback',
+ _XmlSerializer.rollback_upgrade_to_xml(mode, force),
+ async=True)
+
+ def reboot_role_instance(self, service_name, deployment_name, role_instance_name):
+ '''
+ Requests a reboot of a role instance that is running in a deployment.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ role_instance_name: The name of the role instance.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_instance_name', role_instance_name)
+ return self._perform_post(self._get_deployment_path_using_name(service_name, deployment_name) + '/roleinstances/' + str(role_instance_name) + '?comp=reboot',
+ '',
+ async=True)
+
+ def reimage_role_instance(self, service_name, deployment_name, role_instance_name):
+ '''
+ Requests a reimage of a role instance that is running in a deployment.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name of the deployment.
+ role_instance_name: The name of the role instance.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_instance_name', role_instance_name)
+ return self._perform_post(self._get_deployment_path_using_name(service_name, deployment_name) + '/roleinstances/' + str(role_instance_name) + '?comp=reimage',
+ '',
+ async=True)
+
+ def check_hosted_service_name_availability(self, service_name):
+ '''
+ Checks to see if the specified hosted service name is available, or if
+ it has already been taken.
+
+ service_name: Name of the hosted service.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_get('/' + self.subscription_id + '/services/hostedservices/operations/isavailable/' + str(service_name) + '',
+ AvailabilityResponse)
+
+ #--Operations for service certificates -------------------------------
+ def list_service_certificates(self, service_name):
+ '''
+ Lists all of the service certificates associated with the specified
+ hosted service.
+
+ service_name: Name of the hosted service.
+ '''
+ _validate_not_none('service_name', service_name)
+ return self._perform_get('/' + self.subscription_id + '/services/hostedservices/' + str(service_name) + '/certificates',
+ Certificates)
+
+ def get_service_certificate(self, service_name, thumbalgorithm, thumbprint):
+ '''
+ Returns the public data for the specified X.509 certificate associated
+ with a hosted service.
+
+ service_name: Name of the hosted service.
+ thumbalgorithm: The algorithm for the certificate's thumbprint.
+ thumbprint: The hexadecimal representation of the thumbprint.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('thumbalgorithm', thumbalgorithm)
+ _validate_not_none('thumbprint', thumbprint)
+ return self._perform_get('/' + self.subscription_id + '/services/hostedservices/' + str(service_name) + '/certificates/' + str(thumbalgorithm) + '-' + str(thumbprint) + '',
+ Certificate)
+
+ def add_service_certificate(self, service_name, data, certificate_format, password):
+ '''
+ Adds a certificate to a hosted service.
+
+ service_name: Name of the hosted service.
+ data: The base-64 encoded form of the pfx file.
+ certificate_format: The service certificate format. The only supported
+ value is pfx.
+ password: The certificate password.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('data', data)
+ _validate_not_none('certificate_format', certificate_format)
+ _validate_not_none('password', password)
+ return self._perform_post('/' + self.subscription_id + '/services/hostedservices/' + str(service_name) + '/certificates',
+ _XmlSerializer.certificate_file_to_xml(data, certificate_format, password),
+ async=True)
+
+ def delete_service_certificate(self, service_name, thumbalgorithm, thumbprint):
+ '''
+ Deletes a service certificate from the certificate store of a hosted
+ service.
+
+ service_name: Name of the hosted service.
+ thumbalgorithm: The algorithm for the certificate's thumbprint.
+ thumbprint: The hexadecimal representation of the thumbprint.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('thumbalgorithm', thumbalgorithm)
+ _validate_not_none('thumbprint', thumbprint)
+ return self._perform_delete('/' + self.subscription_id + '/services/hostedservices/' + str(service_name) + '/certificates/' + str(thumbalgorithm) + '-' + str(thumbprint),
+ async=True)
+
+ #--Operations for management certificates ----------------------------
+ def list_management_certificates(self):
+ '''
+ The List Management Certificates operation lists and returns basic
+ information about all of the management certificates associated with
+ the specified subscription. Management certificates, which are also
+ known as subscription certificates, authenticate clients attempting to
+ connect to resources associated with your Windows Azure subscription.
+ '''
+ return self._perform_get('/' + self.subscription_id + '/certificates',
+ SubscriptionCertificates)
+
+ def get_management_certificate(self, thumbprint):
+ '''
+ The Get Management Certificate operation retrieves information about
+ the management certificate with the specified thumbprint. Management
+ certificates, which are also known as subscription certificates,
+ authenticate clients attempting to connect to resources associated
+ with your Windows Azure subscription.
+
+ thumbprint: The thumbprint value of the certificate.
+ '''
+ _validate_not_none('thumbprint', thumbprint)
+ return self._perform_get('/' + self.subscription_id + '/certificates/' + str(thumbprint),
+ SubscriptionCertificate)
+
+ def add_management_certificate(self, public_key, thumbprint, data):
+ '''
+ The Add Management Certificate operation adds a certificate to the
+ list of management certificates. Management certificates, which are
+ also known as subscription certificates, authenticate clients
+ attempting to connect to resources associated with your Windows Azure
+ subscription.
+
+ public_key: A base64 representation of the management certificate
+ public key.
+ thumbprint: The thumb print that uniquely identifies the management
+ certificate.
+ data: The certificate?s raw data in base-64 encoded .cer format.
+ '''
+ _validate_not_none('public_key', public_key)
+ _validate_not_none('thumbprint', thumbprint)
+ _validate_not_none('data', data)
+ return self._perform_post('/' + self.subscription_id + '/certificates',
+ _XmlSerializer.subscription_certificate_to_xml(public_key, thumbprint, data))
+
+ def delete_management_certificate(self, thumbprint):
+ '''
+ The Delete Management Certificate operation deletes a certificate from
+ the list of management certificates. Management certificates, which
+ are also known as subscription certificates, authenticate clients
+ attempting to connect to resources associated with your Windows Azure
+ subscription.
+
+ thumbprint: The thumb print that uniquely identifies the management certificate.
+ '''
+ _validate_not_none('thumbprint', thumbprint)
+ return self._perform_delete('/' + self.subscription_id + '/certificates/' + str(thumbprint))
+
+ #--Operations for affinity groups ------------------------------------
+ def list_affinity_groups(self):
+ '''
+ Lists the affinity groups associated with the specified subscription.
+ '''
+ return self._perform_get('/' + self.subscription_id + '/affinitygroups',
+ AffinityGroups)
+
+ def get_affinity_group_properties(self, affinity_group_name):
+ '''
+ Returns the system properties associated with the specified affinity
+ group.
+
+ affinity_group_name: The name of the affinity group.
+ '''
+ _validate_not_none('affinity_group_name', affinity_group_name)
+ return self._perform_get('/' + self.subscription_id + '/affinitygroups/' + str(affinity_group_name) + '',
+ AffinityGroup)
+
+ def create_affinity_group(self, name, label, location, description=None):
+ '''
+ Creates a new affinity group for the specified subscription.
+
+ name: A name for the affinity group that is unique to the subscription.
+ label: A base-64 encoded name for the affinity group. The name can be
+ up to 100 characters in length.
+ location: The data center location where the affinity group will be
+ created. To list available locations, use the list_location
+ function.
+ description: A description for the affinity group. The description can
+ be up to 1024 characters in length.
+ '''
+ _validate_not_none('name', name)
+ _validate_not_none('label', label)
+ _validate_not_none('location', location)
+ return self._perform_post('/' + self.subscription_id + '/affinitygroups',
+ _XmlSerializer.create_affinity_group_to_xml(name, label, description, location))
+
+ def update_affinity_group(self, affinity_group_name, label, description=None):
+ '''
+ Updates the label and/or the description for an affinity group for the
+ specified subscription.
+
+ affinity_group_name: The name of the affinity group.
+ label: A name for the affinity specified as a base-64 encoded string.
+ The label can be up to 100 characters in length.
+ description: A description for the affinity group. The description can
+ be up to 1024 characters in length.
+ '''
+ _validate_not_none('affinity_group_name', affinity_group_name)
+ _validate_not_none('label', label)
+ return self._perform_put('/' + self.subscription_id + '/affinitygroups/' + str(affinity_group_name),
+ _XmlSerializer.update_affinity_group_to_xml(label, description))
+
+ def delete_affinity_group(self, affinity_group_name):
+ '''
+ Deletes an affinity group in the specified subscription.
+
+ affinity_group_name: The name of the affinity group.
+ '''
+ _validate_not_none('affinity_group_name', affinity_group_name)
+ return self._perform_delete('/' + self.subscription_id + '/affinitygroups/' + str(affinity_group_name))
+
+ #--Operations for locations ------------------------------------------
+ def list_locations(self):
+ '''
+ Lists all of the data center locations that are valid for your
+ subscription.
+ '''
+ return self._perform_get('/' + self.subscription_id + '/locations',
+ Locations)
+
+ #--Operations for tracking asynchronous requests ---------------------
+ def get_operation_status(self, request_id):
+ '''
+ Returns the status of the specified operation. After calling an
+ asynchronous operation, you can call Get Operation Status to determine
+ whether the operation has succeeded, failed, or is still in progress.
+
+ request_id: The request ID for the request you wish to track.
+ '''
+ _validate_not_none('request_id', request_id)
+ return self._perform_get('/' + self.subscription_id + '/operations/' + str(request_id),
+ Operation)
+
+ #--Operations for retrieving operating system information ------------
+ def list_operating_systems(self):
+ '''
+ Lists the versions of the guest operating system that are currently
+ available in Windows Azure.
+ '''
+ return self._perform_get('/' + self.subscription_id + '/operatingsystems',
+ OperatingSystems)
+
+ def list_operating_system_families(self):
+ '''
+ Lists the guest operating system families available in Windows Azure,
+ and also lists the operating system versions available for each family.
+ '''
+ return self._perform_get('/' + self.subscription_id + '/operatingsystemfamilies',
+ OperatingSystemFamilies)
+
+ #--Operations for retrieving subscription history --------------------
+ def get_subscription(self):
+ '''
+ Returns account and resource allocation information on the specified
+ subscription.
+ '''
+ return self._perform_get('/' + self.subscription_id + '',
+ Subscription)
+
+ #--Operations for virtual machines -----------------------------------
+ def get_role(self, service_name, deployment_name, role_name):
+ '''
+ Retrieves the specified virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ return self._perform_get(self._get_role_path(service_name, deployment_name, role_name),
+ PersistentVMRole)
+
+ def create_virtual_machine_deployment(self, service_name, deployment_name, deployment_slot, label, role_name, system_config, os_virtual_hard_disk, network_config=None, availability_set_name=None, data_virtual_hard_disks=None, role_size=None, role_type='PersistentVMRole'):
+ '''
+ Provisions a virtual machine based on the supplied configuration.
+
+ service_name: Name of the hosted service.
+ deployment_name: The name for the deployment. The deployment name must
+ be unique among other deployments for the hosted
+ service.
+ deployment_slot: The environment to which the hosted service is
+ deployed. Valid values are: staging, production
+ label: A name for the hosted service that is base-64 encoded. The name
+ can be up to 100 characters in length. It is recommended that
+ the label be unique within the subscription. The name can be
+ used identify the hosted service for your tracking purposes.
+ role_name: The name of the role.
+ system_config: Contains the metadata required to provision a virtual
+ machine from a Windows or Linux OS image. Use an
+ instance of WindowsConfigurationSet or
+ LinuxConfigurationSet.
+ os_virtual_hard_disk: Contains the parameters Windows Azure uses to
+ create the operating system disk for the virtual
+ machine.
+ network_config: Encapsulates the metadata required to create the
+ virtual network configuration for a virtual machine.
+ If you do not include a network configuration set you
+ will not be able to access the VM through VIPs over
+ the internet. If your virtual machine belongs to a
+ virtual network you can not specify which subnet
+ address space it resides under.
+ availability_set_name: Specifies the name of an availability set to
+ which to add the virtual machine. This value
+ controls the virtual machine allocation in the
+ Windows Azure environment. Virtual machines
+ specified in the same availability set are
+ allocated to different nodes to maximize
+ availability.
+ data_virtual_hard_disks: Contains the parameters Windows Azure uses to
+ create a data disk for a virtual machine.
+ role_size: The size of the virtual machine to allocate. The default
+ value is Small. Possible values are: ExtraSmall, Small,
+ Medium, Large, ExtraLarge. The specified value must be
+ compatible with the disk selected in the OSVirtualHardDisk
+ values.
+ role_type: The type of the role for the virtual machine. The only
+ supported value is PersistentVMRole.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('deployment_slot', deployment_slot)
+ _validate_not_none('label', label)
+ _validate_not_none('role_name', role_name)
+ _validate_not_none('system_config', system_config)
+ _validate_not_none('os_virtual_hard_disk', os_virtual_hard_disk)
+ return self._perform_post(self._get_deployment_path_using_name(service_name),
+ _XmlSerializer.virtual_machine_deployment_to_xml(deployment_name, deployment_slot, label, role_name, system_config, os_virtual_hard_disk, role_type, network_config, availability_set_name, data_virtual_hard_disks, role_size),
+ async=True)
+
+ def add_role(self, service_name, deployment_name, role_name, system_config, os_virtual_hard_disk, network_config=None, availability_set_name=None, data_virtual_hard_disks=None, role_size=None, role_type='PersistentVMRole'):
+ '''
+ Adds a virtual machine to an existing deployment.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ system_config: Contains the metadata required to provision a virtual
+ machine from a Windows or Linux OS image. Use an
+ instance of WindowsConfigurationSet or
+ LinuxConfigurationSet.
+ os_virtual_hard_disk: Contains the parameters Windows Azure uses to
+ create the operating system disk for the virtual
+ machine.
+ network_config: Encapsulates the metadata required to create the
+ virtual network configuration for a virtual machine.
+ If you do not include a network configuration set you
+ will not be able to access the VM through VIPs over
+ the internet. If your virtual machine belongs to a
+ virtual network you can not specify which subnet
+ address space it resides under.
+ availability_set_name: Specifies the name of an availability set to
+ which to add the virtual machine. This value
+ controls the virtual machine allocation in the
+ Windows Azure environment. Virtual machines
+ specified in the same availability set are
+ allocated to different nodes to maximize
+ availability.
+ data_virtual_hard_disks: Contains the parameters Windows Azure uses to
+ create a data disk for a virtual machine.
+ role_size: The size of the virtual machine to allocate. The default
+ value is Small. Possible values are: ExtraSmall, Small,
+ Medium, Large, ExtraLarge. The specified value must be
+ compatible with the disk selected in the OSVirtualHardDisk
+ values.
+ role_type: The type of the role for the virtual machine. The only
+ supported value is PersistentVMRole.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ _validate_not_none('system_config', system_config)
+ _validate_not_none('os_virtual_hard_disk', os_virtual_hard_disk)
+ return self._perform_post(self._get_role_path(service_name, deployment_name),
+ _XmlSerializer.add_role_to_xml(role_name, system_config, os_virtual_hard_disk, role_type, network_config, availability_set_name, data_virtual_hard_disks, role_size),
+ async=True)
+
+ def update_role(self, service_name, deployment_name, role_name, os_virtual_hard_disk=None, network_config=None, availability_set_name=None, data_virtual_hard_disks=None, role_size=None, role_type='PersistentVMRole'):
+ '''
+ Updates the specified virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ os_virtual_hard_disk: Contains the parameters Windows Azure uses to
+ create the operating system disk for the virtual
+ machine.
+ network_config: Encapsulates the metadata required to create the
+ virtual network configuration for a virtual machine.
+ If you do not include a network configuration set you
+ will not be able to access the VM through VIPs over
+ the internet. If your virtual machine belongs to a
+ virtual network you can not specify which subnet
+ address space it resides under.
+ availability_set_name: Specifies the name of an availability set to
+ which to add the virtual machine. This value
+ controls the virtual machine allocation in the
+ Windows Azure environment. Virtual machines
+ specified in the same availability set are
+ allocated to different nodes to maximize
+ availability.
+ data_virtual_hard_disks: Contains the parameters Windows Azure uses to
+ create a data disk for a virtual machine.
+ role_size: The size of the virtual machine to allocate. The default
+ value is Small. Possible values are: ExtraSmall, Small,
+ Medium, Large, ExtraLarge. The specified value must be
+ compatible with the disk selected in the OSVirtualHardDisk
+ values.
+ role_type: The type of the role for the virtual machine. The only
+ supported value is PersistentVMRole.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ return self._perform_put(self._get_role_path(service_name, deployment_name, role_name),
+ _XmlSerializer.update_role_to_xml(role_name, os_virtual_hard_disk, role_type, network_config, availability_set_name, data_virtual_hard_disks, role_size),
+ async=True)
+
+ def delete_role(self, service_name, deployment_name, role_name):
+ '''
+ Deletes the specified virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ return self._perform_delete(self._get_role_path(service_name, deployment_name, role_name),
+ async=True)
+
+ def capture_role(self, service_name, deployment_name, role_name, post_capture_action, target_image_name, target_image_label, provisioning_configuration=None):
+ '''
+ The Capture Role operation captures a virtual machine image to your
+ image gallery. From the captured image, you can create additional
+ customized virtual machines.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ post_capture_action: Specifies the action after capture operation
+ completes. Possible values are: Delete,
+ Reprovision.
+ target_image_name: Specifies the image name of the captured virtual
+ machine.
+ target_image_label: Specifies the friendly name of the captured
+ virtual machine.
+ provisioning_configuration: Use an instance of WindowsConfigurationSet
+ or LinuxConfigurationSet.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ _validate_not_none('post_capture_action', post_capture_action)
+ _validate_not_none('target_image_name', target_image_name)
+ _validate_not_none('target_image_label', target_image_label)
+ return self._perform_post(self._get_role_instance_operations_path(service_name, deployment_name, role_name),
+ _XmlSerializer.capture_role_to_xml(post_capture_action, target_image_name, target_image_label, provisioning_configuration),
+ async=True)
+
+ def start_role(self, service_name, deployment_name, role_name):
+ '''
+ Starts the specified virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ return self._perform_post(self._get_role_instance_operations_path(service_name, deployment_name, role_name),
+ _XmlSerializer.start_role_operation_to_xml(),
+ async=True)
+
+ def restart_role(self, service_name, deployment_name, role_name):
+ '''
+ Restarts the specified virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ return self._perform_post(self._get_role_instance_operations_path(service_name, deployment_name, role_name),
+ _XmlSerializer.restart_role_operation_to_xml(),
+ async=True)
+
+ def shutdown_role(self, service_name, deployment_name, role_name):
+ '''
+ Shuts down the specified virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ return self._perform_post(self._get_role_instance_operations_path(service_name, deployment_name, role_name),
+ _XmlSerializer.shutdown_role_operation_to_xml(),
+ async=True)
+
+ #--Operations for virtual machine images -----------------------------
+ def list_os_images(self):
+ '''
+ Retrieves a list of the OS images from the image repository.
+ '''
+ return self._perform_get(self._get_image_path(),
+ Images)
+
+ def get_os_image(self, image_name):
+ '''
+ Retrieves an OS image from the image repository.
+ '''
+ return self._perform_get(self._get_image_path(image_name),
+ OSImage)
+
+ def add_os_image(self, label, media_link, name, os):
+ '''
+ Adds an OS image that is currently stored in a storage account in your
+ subscription to the image repository.
+
+ label: Specifies the friendly name of the image.
+ media_link: Specifies the location of the blob in Windows Azure blob
+ store where the media for the image is located. The blob
+ location must belong to a storage account in the
+ subscription specified by the value in
+ the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ name: Specifies a name for the OS image that Windows Azure uses to
+ identify the image when creating one or more virtual machines.
+ os: The operating system type of the OS image. Possible values are:
+ Linux, Windows
+ '''
+ _validate_not_none('label', label)
+ _validate_not_none('media_link', media_link)
+ _validate_not_none('name', name)
+ _validate_not_none('os', os)
+ return self._perform_post(self._get_image_path(),
+ _XmlSerializer.os_image_to_xml(label, media_link, name, os),
+ async=True)
+
+ def update_os_image(self, image_name, label, media_link, name, os):
+ '''
+ Updates an OS image that in your image repository.
+
+ image_name: The name of the image to update.
+ label: Specifies the friendly name of the image to be updated. You
+ cannot use this operation to update images provided by the
+ Windows Azure platform.
+ media_link: Specifies the location of the blob in Windows Azure blob
+ store where the media for the image is located. The blob
+ location must belong to a storage account in the
+ subscription specified by the value in
+ the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ name: Specifies a name for the OS image that Windows Azure uses to
+ identify the image when creating one or more VM Roles.
+ os: The operating system type of the OS image. Possible values are:
+ Linux, Windows
+ '''
+ _validate_not_none('image_name', image_name)
+ _validate_not_none('label', label)
+ _validate_not_none('media_link', media_link)
+ _validate_not_none('name', name)
+ _validate_not_none('os', os)
+ return self._perform_put(self._get_image_path(image_name),
+ _XmlSerializer.os_image_to_xml(label, media_link, name, os),
+ async=True)
+
+ def delete_os_image(self, image_name):
+ '''
+ Deletes the specified OS image from your image repository.
+
+ image_name: The name of the image.
+ '''
+ _validate_not_none('image_name', image_name)
+ return self._perform_delete(self._get_image_path(image_name),
+ async=True)
+
+ #--Operations for virtual machine disks ------------------------------
+ def get_data_disk(self, service_name, deployment_name, role_name, lun):
+ '''
+ Retrieves the specified data disk from a virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ lun: The Logical Unit Number (LUN) for the disk.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ _validate_not_none('lun', lun)
+ return self._perform_get(self._get_data_disk_path(service_name, deployment_name, role_name, lun),
+ DataVirtualHardDisk)
+
+ def add_data_disk(self, service_name, deployment_name, role_name, lun, host_caching=None, media_link=None, disk_label=None, disk_name=None, logical_disk_size_in_gb=None, source_media_link=None):
+ '''
+ Adds a data disk to a virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ lun: Specifies the Logical Unit Number (LUN) for the disk. The LUN
+ specifies the slot in which the data drive appears when mounted
+ for usage by the virtual machine. Valid LUN values are 0 through
+ 15.
+ host_caching: Specifies the platform caching behavior of data disk
+ blob for read/write efficiency. The default vault is
+ ReadOnly. Possible values are: None, ReadOnly, ReadWrite
+ media_link: Specifies the location of the blob in Windows Azure blob
+ store where the media for the disk is located. The blob
+ location must belong to the storage account in the
+ subscription specified by the value in
+ the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ disk_label: Specifies the description of the data disk. When you
+ attach a disk, either by directly referencing a media
+ using the MediaLink element or specifying the target disk
+ size, you can use the DiskLabel element to customize the
+ name property of the target data disk.
+ disk_name: Specifies the name of the disk. Windows Azure uses the
+ specified disk to create the data disk for the machine and
+ populates this field with the disk name.
+ logical_disk_size_in_gb: Specifies the size, in GB, of an empty disk
+ to be attached to the role. The disk can be
+ created as part of disk attach or create VM
+ role call by specifying the value for this
+ property. Windows Azure creates the empty
+ disk based on size preference and attaches
+ the newly created disk to the Role.
+ source_media_link: Specifies the location of a blob in account storage
+ which is mounted as a data disk when the virtual
+ machine is created.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ _validate_not_none('lun', lun)
+ return self._perform_post(self._get_data_disk_path(service_name, deployment_name, role_name),
+ _XmlSerializer.data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, lun, logical_disk_size_in_gb, media_link, source_media_link),
+ async=True)
+
+ def update_data_disk(self, service_name, deployment_name, role_name, lun, host_caching=None, media_link=None, updated_lun=None, disk_label=None, disk_name=None, logical_disk_size_in_gb=None):
+ '''
+ Updates the specified data disk attached to the specified virtual
+ machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ lun: Specifies the Logical Unit Number (LUN) for the disk. The LUN
+ specifies the slot in which the data drive appears when mounted
+ for usage by the virtual machine. Valid LUN values are 0 through
+ 15.
+ host_caching: Specifies the platform caching behavior of data disk
+ blob for read/write efficiency. The default vault is
+ ReadOnly. Possible values are: None, ReadOnly, ReadWrite
+ media_link: Specifies the location of the blob in Windows Azure blob
+ store where the media for the disk is located. The blob
+ location must belong to the storage account in the
+ subscription specified by the value in
+ the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ updated_lun: Specifies the Logical Unit Number (LUN) for the disk. The
+ LUN specifies the slot in which the data drive appears
+ when mounted for usage by the virtual machine. Valid LUN
+ values are 0 through 15.
+ disk_label: Specifies the description of the data disk. When you
+ attach a disk, either by directly referencing a media
+ using the MediaLink element or specifying the target disk
+ size, you can use the DiskLabel element to customize the
+ name property of the target data disk.
+ disk_name: Specifies the name of the disk. Windows Azure uses the
+ specified disk to create the data disk for the machine and
+ populates this field with the disk name.
+ logical_disk_size_in_gb: Specifies the size, in GB, of an empty disk
+ to be attached to the role. The disk can be
+ created as part of disk attach or create VM
+ role call by specifying the value for this
+ property. Windows Azure creates the empty
+ disk based on size preference and attaches
+ the newly created disk to the Role.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ _validate_not_none('lun', lun)
+ return self._perform_put(self._get_data_disk_path(service_name, deployment_name, role_name, lun),
+ _XmlSerializer.data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, updated_lun, logical_disk_size_in_gb, media_link, None),
+ async=True)
+
+ def delete_data_disk(self, service_name, deployment_name, role_name, lun):
+ '''
+ Removes the specified data disk from a virtual machine.
+
+ service_name: The name of the service.
+ deployment_name: The name of the deployment.
+ role_name: The name of the role.
+ lun: The Logical Unit Number (LUN) for the disk.
+ '''
+ _validate_not_none('service_name', service_name)
+ _validate_not_none('deployment_name', deployment_name)
+ _validate_not_none('role_name', role_name)
+ _validate_not_none('lun', lun)
+ return self._perform_delete(self._get_data_disk_path(service_name, deployment_name, role_name, lun),
+ async=True)
+
+ #--Operations for virtual machine disks ------------------------------
+ def list_disks(self):
+ '''
+ Retrieves a list of the disks in your image repository.
+ '''
+ return self._perform_get(self._get_disk_path(),
+ Disks)
+
+ def get_disk(self, disk_name):
+ '''
+ Retrieves a disk from your image repository.
+ '''
+ return self._perform_get(self._get_disk_path(disk_name),
+ Disk)
+
+ def add_disk(self, has_operating_system, label, media_link, name, os):
+ '''
+ Adds a disk to the user image repository. The disk can be an OS disk
+ or a data disk.
+
+ has_operating_system: Specifies whether the disk contains an operation
+ system. Only a disk with an operating system
+ installed can be mounted as OS Drive.
+ label: Specifies the description of the disk.
+ media_link: Specifies the location of the blob in Windows Azure blob
+ store where the media for the disk is located. The blob
+ location must belong to the storage account in the current
+ subscription specified by the value in
+ the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ name: Specifies a name for the disk. Windows Azure uses the name to
+ identify the disk when creating virtual machines from the disk.
+ os: The OS type of the disk. Possible values are: Linux, Windows
+ '''
+ _validate_not_none('has_operating_system', has_operating_system)
+ _validate_not_none('label', label)
+ _validate_not_none('media_link', media_link)
+ _validate_not_none('name', name)
+ _validate_not_none('os', os)
+ return self._perform_post(self._get_disk_path(),
+ _XmlSerializer.disk_to_xml(has_operating_system, label, media_link, name, os))
+
+ def update_disk(self, disk_name, has_operating_system, label, media_link, name, os):
+ '''
+ Updates an existing disk in your image repository.
+
+ disk_name: The name of the disk to update.
+ has_operating_system: Specifies whether the disk contains an operation
+ system. Only a disk with an operating system
+ installed can be mounted as OS Drive.
+ label: Specifies the description of the disk.
+ media_link: Specifies the location of the blob in Windows Azure blob
+ store where the media for the disk is located. The blob
+ location must belong to the storage account in the current
+ subscription specified by the value in
+ the operation call. Example:
+ http://example.blob.core.windows.net/disks/mydisk.vhd
+ name: Specifies a name for the disk. Windows Azure uses the name to
+ identify the disk when creating virtual machines from the disk.
+ os: The OS type of the disk. Possible values are: Linux, Windows
+ '''
+ _validate_not_none('disk_name', disk_name)
+ _validate_not_none('has_operating_system', has_operating_system)
+ _validate_not_none('label', label)
+ _validate_not_none('media_link', media_link)
+ _validate_not_none('name', name)
+ _validate_not_none('os', os)
+ return self._perform_put(self._get_disk_path(disk_name),
+ _XmlSerializer.disk_to_xml(has_operating_system, label, media_link, name, os))
+
+ def delete_disk(self, disk_name):
+ '''
+ Deletes the specified data or operating system disk from your image
+ repository.
+
+ disk_name: The name of the disk to delete.
+ '''
+ _validate_not_none('disk_name', disk_name)
+ return self._perform_delete(self._get_disk_path(disk_name))
+
+ #--Helper functions --------------------------------------------------
+ def _perform_request(self, request):
+ try:
+ resp = self._filter(request)
+ except HTTPError as e:
+ return _management_error_handler(e)
+
+ return resp
+
+ def _perform_get(self, path, response_type):
+ request = HTTPRequest()
+ request.method = 'GET'
+ request.host = self.host
+ request.path = path
+ request.path, request.query = _update_request_uri_query(request)
+ request.headers = _update_management_header(request)
+ response = self._perform_request(request)
+
+ return _parse_response(response, response_type)
+
+ def _perform_put(self, path, body, async=False):
+ request = HTTPRequest()
+ request.method = 'PUT'
+ request.host = self.host
+ request.path = path
+ request.body = _get_request_body(body)
+ request.path, request.query = _update_request_uri_query(request)
+ request.headers = _update_management_header(request)
+ response = self._perform_request(request)
+
+ if async:
+ return _parse_response_for_async_op(response)
+
+ return None
+
+ def _perform_post(self, path, body, response_type=None, async=False):
+ request = HTTPRequest()
+ request.method = 'POST'
+ request.host = self.host
+ request.path = path
+ request.body = _get_request_body(body)
+ request.path, request.query = _update_request_uri_query(request)
+ request.headers = _update_management_header(request)
+ response = self._perform_request(request)
+
+ if response_type is not None:
+ return _parse_response(response, response_type)
+
+ if async:
+ return _parse_response_for_async_op(response)
+
+ return None
+
+ def _perform_delete(self, path, async=False):
+ request = HTTPRequest()
+ request.method = 'DELETE'
+ request.host = self.host
+ request.path = path
+ request.path, request.query = _update_request_uri_query(request)
+ request.headers = _update_management_header(request)
+ response = self._perform_request(request)
+
+ if async:
+ return _parse_response_for_async_op(response)
+
+ return None
+
+ def _get_path(self, resource, name):
+ path = '/' + self.subscription_id + '/' + resource
+ if name is not None:
+ path += '/' + str(name)
+ return path
+
+ def _get_storage_service_path(self, service_name=None):
+ return self._get_path('services/storageservices', service_name)
+
+ def _get_hosted_service_path(self, service_name=None):
+ return self._get_path('services/hostedservices', service_name)
+
+ def _get_deployment_path_using_slot(self, service_name, slot=None):
+ return self._get_path('services/hostedservices/' + str(service_name) + '/deploymentslots', slot)
+
+ def _get_deployment_path_using_name(self, service_name, deployment_name=None):
+ return self._get_path('services/hostedservices/' + str(service_name) + '/deployments', deployment_name)
+
+ def _get_role_path(self, service_name, deployment_name, role_name=None):
+ return self._get_path('services/hostedservices/' + str(service_name) + '/deployments/' + deployment_name + '/roles', role_name)
+
+ def _get_role_instance_operations_path(self, service_name, deployment_name, role_name=None):
+ return self._get_path('services/hostedservices/' + str(service_name) + '/deployments/' + deployment_name + '/roleinstances', role_name) + '/Operations'
+
+ def _get_data_disk_path(self, service_name, deployment_name, role_name, lun=None):
+ return self._get_path('services/hostedservices/' + str(service_name) + '/deployments/' + str(deployment_name) + '/roles/' + str(role_name) + '/DataDisks', lun)
+
+ def _get_disk_path(self, disk_name=None):
+ return self._get_path('services/disks', disk_name)
+
+ def _get_image_path(self, image_name=None):
+ return self._get_path('services/images', image_name)
diff --git a/src/azure/storage/__init__.py b/src/azure/storage/__init__.py
index bbb6847b8182..a82660709323 100644
--- a/src/azure/storage/__init__.py
+++ b/src/azure/storage/__init__.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,10 +21,10 @@
import types
from datetime import datetime
-from azure import (_create_entry,
- _get_entry_properties, _html_encode, WindowsAzureError,
+from azure import (_create_entry, METADATA_NS, _parse_response_for_dict,
+ _get_entry_properties, WindowsAzureError,
_get_child_nodes, _get_child_nodesNS,
- WindowsAzureConflictError,
+ WindowsAzureConflictError, _general_error_handler,
WindowsAzureMissingResourceError, _list_of,
DEV_TABLE_HOST, TABLE_SERVICE_HOST_BASE, DEV_BLOB_HOST,
BLOB_SERVICE_HOST_BASE, DEV_QUEUE_HOST,
@@ -51,10 +51,13 @@ class ContainerEnumResults(EnumResultsBase):
def __init__(self):
EnumResultsBase.__init__(self)
self.containers = _list_of(Container)
+
def __iter__(self):
return iter(self.containers)
+
def __len__(self):
return len(self.containers)
+
def __getitem__(self, index):
return self.containers[index]
@@ -65,7 +68,7 @@ def __init__(self):
self.name = ''
self.url = ''
self.properties = Properties()
- self.metadata = Metadata()
+ self.metadata = {}
class Properties(WindowsAzureData):
''' Blob container's properties class. '''
@@ -74,29 +77,20 @@ def __init__(self):
self.last_modified = ''
self.etag = ''
-class Metadata(WindowsAzureData):
- ''' Metadata class. '''
-
- def __init__(self):
- self.metadata_name = ''
-
class RetentionPolicy(WindowsAzureData):
''' RetentionPolicy in service properties. '''
+
def __init__(self):
self.enabled = False
self.__dict__['days'] = None
- def get_days(self):
-
+ def get_days(self):
#convert days to int value
return int(self.__dict__['days'])
def set_days(self, value):
''' set default days if days is set to empty. '''
- if value == '':
- self.__dict__['days'] = 10
- else:
- self.__dict__['days'] = value
+ self.__dict__['days'] = value
days = property(fget=get_days, fset=set_days)
@@ -143,10 +137,18 @@ def __init__(self):
class SignedIdentifiers(WindowsAzureData):
''' SignedIdentifier list. '''
+
def __init__(self):
- self.signed_identifiers = _list_of(SignedIdentifier)
+ self.signed_identifiers = _list_of(SignedIdentifier)
+
def __iter__(self):
- return self.signed_identifiers
+ return iter(self.signed_identifiers)
+
+ def __len__(self):
+ return len(self.signed_identifiers)
+
+ def __getitem__(self, index):
+ return self.signed_identifiers[index]
class BlobEnumResults(EnumResultsBase):
''' Blob list.'''
@@ -154,13 +156,24 @@ class BlobEnumResults(EnumResultsBase):
def __init__(self):
EnumResultsBase.__init__(self)
self.blobs = _list_of(Blob)
+
def __iter__(self):
return iter(self.blobs)
+
def __len__(self):
return len(self.blobs)
+
def __getitem__(self, index):
return self.blobs[index]
+class BlobResult(str):
+
+ def __new__(cls, blob, properties):
+ return str.__new__(cls, blob)
+
+ def __init__(self, blob, properties):
+ self.properties = properties
+
class Blob(WindowsAzureData):
''' Blob class. '''
@@ -169,7 +182,7 @@ def __init__(self):
self.snapshot = ''
self.url = ''
self.properties = BlobProperties()
- self.metadata = Metadata()
+ self.metadata = {}
self.blob_prefix = BlobPrefix()
class BlobProperties(WindowsAzureData):
@@ -202,20 +215,14 @@ def __init__(self, id=None, size=None):
class BlobBlockList(WindowsAzureData):
''' BlobBlockList class '''
+
def __init__(self):
self.committed_blocks = []
self.uncommitted_blocks = []
-class BlockList(WindowsAzureData):
- ''' BlockList used to submit block list. '''
-
- def __init__(self):
- self.committed = []
- self.uncommitted = []
- self.latest = []
-
class PageRange(WindowsAzureData):
''' Page Range for page blob. '''
+
def __init__(self):
self.start = 0
self.end = 0
@@ -225,8 +232,15 @@ class PageList:
def __init__(self):
self.page_ranges = _list_of(PageRange)
+
def __iter__(self):
- return self.page_ranges
+ return iter(self.page_ranges)
+
+ def __len__(self):
+ return len(self.page_ranges)
+
+ def __getitem__(self, index):
+ return self.page_ranges[index]
class QueueEnumResults(EnumResultsBase):
''' Queue list'''
@@ -234,10 +248,13 @@ class QueueEnumResults(EnumResultsBase):
def __init__(self):
EnumResultsBase.__init__(self)
self.queues = _list_of(Queue)
+
def __iter__(self):
return iter(self.queues)
+
def __len__(self):
return len(self.queues)
+
def __getitem__(self, index):
return self.queues[index]
@@ -247,17 +264,20 @@ class Queue(WindowsAzureData):
def __init__(self):
self.name = ''
self.url = ''
- self.metadata = Metadata()
+ self.metadata = {}
class QueueMessagesList(WindowsAzureData):
''' Queue message list. '''
def __init__(self):
self.queue_messages = _list_of(QueueMessage)
+
def __iter__(self):
return iter(self.queue_messages)
+
def __len__(self):
return len(self.queue_messages)
+
def __getitem__(self, index):
return self.queue_messages[index]
@@ -273,17 +293,6 @@ def __init__(self):
self.dequeue_count = ''
self.message_text = ''
-class TableEnumResult(EnumResultsBase):
- def __init__():
- EnumResultsBase.__init__(self)
- self.tables = _list_of(Table)
- def __iter__(self):
- return iter(self.tables)
- def __len__(self):
- return len(self.tables)
- def __getitem__(self, index):
- return self.tables[index]
-
class Entity(WindowsAzureData):
''' Entity class. The attributes of entity will be created dynamically. '''
pass
@@ -430,15 +439,18 @@ def _sign_storage_table_request(request, account_name, account_key):
auth_string = 'SharedKey ' + account_name + ':' + base64.b64encode(signed_hmac_sha256.digest())
return auth_string
-
-
def _to_python_bool(value):
if value.lower() == 'true':
return True
return False
def _to_entity_int(data):
- return 'Edm.Int32', str(data)
+ int_max = (2 << 30) - 1
+ import sys
+ if data > (int_max) or data < (int_max + 1)*(-1):
+ return 'Edm.Int64', str(data)
+ else:
+ return 'Edm.Int32', str(data)
def _to_entity_bool(value):
if value:
@@ -469,7 +481,10 @@ def _from_entity_int(value):
return int(value)
def _from_entity_datetime(value):
- return datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
+ if value.endswith('Z'):
+ return datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
+ else:
+ return datetime.strptime(value, '%Y-%m-%dT%H:%M:%S')
_ENTITY_TO_PYTHON_CONVERSIONS = {
'Edm.Int32': _from_entity_int,
@@ -542,9 +557,15 @@ def convert_entity_to_xml(source):
#form the property node
properties_str += ''.join(['', xml_escape(value), ''])
+ if value == '':
+ properties_str += ' m:null="true" />'
+ else:
+ if mtype:
+ properties_str += ''.join([' m:type="', mtype, '"'])
+ properties_str += ''.join(['>', xml_escape(value), ''])
+
+ if isinstance(properties_str, unicode):
+ properties_str = properties_str.encode(encoding='utf-8')
#generate the entity_body
entity_body = entity_body.format(properties=properties_str)
@@ -576,6 +597,10 @@ def convert_block_list_to_xml(block_id_list):
return xml+''
+def _create_blob_result(response):
+ blob_properties = _parse_response_for_dict(response)
+ return BlobResult(response.body, blob_properties)
+
def convert_response_to_block_list(response):
'''
Converts xml response to block list class.
@@ -601,8 +626,9 @@ def _remove_prefix(name):
return name[colon + 1:]
return name
-METADATA_NS = 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata'
def _convert_response_to_entity(response):
+ if response is None:
+ return response
return _convert_xml_to_entity(response.body)
def _convert_xml_to_entity(xmlstr):
@@ -644,7 +670,6 @@ def _convert_xml_to_entity(xmlstr):
return None
entity = Entity()
-
#extract each property node and get the type from attribute and node value
for xml_property in xml_properties[0].childNodes:
if xml_property.firstChild:
@@ -662,19 +687,24 @@ def _convert_xml_to_entity(xmlstr):
#if not isnull and no type info, then it is a string and we just need the str type to hold the property.
if not isnull and not mtype:
setattr(entity, name, value)
+ elif isnull == 'true':
+ if mtype:
+ property = EntityProperty(mtype, None)
+ else:
+ property = EntityProperty('Edm.String', None)
else: #need an object to hold the property
conv = _ENTITY_TO_PYTHON_CONVERSIONS.get(mtype)
if conv is not None:
property = conv(value)
else:
- property = EntityProperty()
- setattr(property, 'value', value)
- if isnull:
- property.isnull = str(isnull)
- if mtype:
- property.type = str(mtype)
+ property = EntityProperty(mtype, value)
setattr(entity, name, property)
+ #extract id, updated and name value from feed entry and set them of rule.
+ for name, value in _get_entry_properties(xmlstr, True).iteritems():
+ if name in ['etag']:
+ setattr(entity, name, value)
+
return entity
def _convert_xml_to_table(xmlstr):
@@ -690,12 +720,7 @@ def _convert_xml_to_table(xmlstr):
def _storage_error_handler(http_error):
''' Simple error handler for storage service. Will add more specific cases '''
- if http_error.status == 409:
- raise WindowsAzureConflictError(azure._ERROR_CONFLICT)
- elif http_error.status == 404:
- raise WindowsAzureMissingResourceError(azure._ERROR_NOT_FOUND)
- else:
- raise WindowsAzureError(azure._ERROR_UNKNOWN % http_error.message)
+ return _general_error_handler(http_error)
# make these available just from storage.
from blobservice import BlobService
diff --git a/src/azure/storage/blobservice.py b/src/azure/storage/blobservice.py
index 28bbacc1f453..b79dfa420795 100644
--- a/src/azure/storage/blobservice.py
+++ b/src/azure/storage/blobservice.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,18 +18,17 @@
from azure.storage import *
from azure.storage.storageclient import _StorageClient
-from azure.storage import (_update_storage_blob_header,
+from azure.storage import (_update_storage_blob_header, _create_blob_result,
convert_block_list_to_xml, convert_response_to_block_list)
-from azure.http import HTTPRequest
+from azure.http import HTTPRequest, HTTP_RESPONSE_NO_CONTENT
from azure import (_validate_not_none, Feed,
_convert_response_to_feeds, _str_or_none, _int_or_none,
_get_request_body, _update_request_uri_query,
- _dont_fail_on_exist, _dont_fail_not_exist,
+ _dont_fail_on_exist, _dont_fail_not_exist, WindowsAzureConflictError,
WindowsAzureError, _parse_response, _convert_class_to_xml,
_parse_response_for_dict, _parse_response_for_dict_prefix,
_parse_response_for_dict_filter,
_parse_enum_results_list, _update_request_uri_query_local_storage,
- _get_table_host, _get_queue_host, _get_blob_host,
_parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
class BlobService(_StorageClient):
@@ -39,6 +38,9 @@ class BlobService(_StorageClient):
account_key: your storage account key, required for all operations.
'''
+ def __init__(self, account_name = None, account_key = None, protocol = 'http', host_base = BLOB_SERVICE_HOST_BASE, dev_host = DEV_BLOB_HOST):
+ return super(BlobService, self).__init__(account_name, account_key, protocol, host_base, dev_host)
+
def list_containers(self, prefix=None, marker=None, maxresults=None, include=None):
'''
The List Containers operation returns a list of the containers under the specified account.
@@ -49,11 +51,12 @@ def list_containers(self, prefix=None, marker=None, maxresults=None, include=Non
with the next list operation.
maxresults: Optional. Specifies the maximum number of containers to return.
include: Optional. Include this parameter to specify that the container's metadata be
- returned as part of the response body.
+ returned as part of the response body. set this parameter to string 'metadata' to
+ get container's metadata.
'''
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/?comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
@@ -80,7 +83,7 @@ def create_container(self, container_name, x_ms_meta_name_values=None, x_ms_blob
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '?restype=container'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
@@ -106,7 +109,7 @@ def get_container_properties(self, container_name):
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '?restype=container'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
@@ -122,7 +125,7 @@ def get_container_metadata(self, container_name):
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '?restype=container&comp=metadata'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
@@ -139,7 +142,7 @@ def set_container_metadata(self, container_name, x_ms_meta_name_values=None):
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '?restype=container&comp=metadata'
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -153,7 +156,7 @@ def get_container_acl(self, container_name):
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '?restype=container&comp=acl'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
@@ -171,7 +174,7 @@ def set_container_acl(self, container_name, signed_identifiers=None, x_ms_blob_p
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '?restype=container&comp=acl'
request.headers = [('x-ms-blob-public-access', _str_or_none(x_ms_blob_public_access))]
request.body = _get_request_body(_convert_class_to_xml(signed_identifiers))
@@ -188,7 +191,7 @@ def delete_container(self, container_name, fail_not_exist=False):
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '?restype=container'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
@@ -210,7 +213,7 @@ def list_blobs(self, container_name, prefix=None, marker=None, maxresults=None,
_validate_not_none('container_name', container_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '?restype=container&comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
@@ -237,7 +240,7 @@ def set_blob_service_properties(self, storage_service_properties, timeout=None):
_validate_not_none('storage_service_properties', storage_service_properties)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.body = _get_request_body(_convert_class_to_xml(storage_service_properties))
@@ -255,7 +258,7 @@ def get_blob_service_properties(self, timeout=None):
'''
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -274,7 +277,7 @@ def get_blob_properties(self, container_name, blob_name, x_ms_lease_id=None):
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'HEAD'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + ''
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -298,7 +301,7 @@ def set_blob_properties(self, container_name, blob_name, x_ms_blob_cache_control
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=properties'
request.headers = [
('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),
@@ -312,7 +315,7 @@ def set_blob_properties(self, container_name, blob_name, x_ms_blob_cache_control
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
- def put_blob(self, container_name, blob_name, blob, x_ms_blob_type, content_encoding=None, content_language=None, content_m_d5=None, cache_control=None, x_ms_blob_content_type=None, x_ms_blob_content_encoding=None, x_ms_blob_content_language=None, x_ms_blob_content_md5=None, x_ms_blob_cache_control=None, x_ms_meta_name_values=None, x_ms_lease_id=None, x_ms_blob_content_length=None, x_ms_blob_sequence_number=None):
+ def put_blob(self, container_name, blob_name, blob, x_ms_blob_type, content_encoding=None, content_language=None, content_md5=None, cache_control=None, x_ms_blob_content_type=None, x_ms_blob_content_encoding=None, x_ms_blob_content_language=None, x_ms_blob_content_md5=None, x_ms_blob_cache_control=None, x_ms_meta_name_values=None, x_ms_lease_id=None, x_ms_blob_content_length=None, x_ms_blob_sequence_number=None):
'''
Creates a new block blob or page blob, or updates the content of an existing block blob.
@@ -329,13 +332,13 @@ def put_blob(self, container_name, blob_name, blob, x_ms_blob_type, content_enco
_validate_not_none('x_ms_blob_type', x_ms_blob_type)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + ''
request.headers = [
('x-ms-blob-type', _str_or_none(x_ms_blob_type)),
('Content-Encoding', _str_or_none(content_encoding)),
('Content-Language', _str_or_none(content_language)),
- ('Content-MD5', _str_or_none(content_m_d5)),
+ ('Content-MD5', _str_or_none(content_md5)),
('Cache-Control', _str_or_none(cache_control)),
('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),
('x-ms-blob-content-encoding', _str_or_none(x_ms_blob_content_encoding)),
@@ -364,7 +367,7 @@ def get_blob(self, container_name, blob_name, snapshot=None, x_ms_range=None, x_
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + ''
request.headers = [
('x-ms-range', _str_or_none(x_ms_range)),
@@ -376,7 +379,7 @@ def get_blob(self, container_name, blob_name, snapshot=None, x_ms_range=None, x_
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
- return response.body
+ return _create_blob_result(response)
def get_blob_metadata(self, container_name, blob_name, snapshot=None, x_ms_lease_id=None):
'''
@@ -389,7 +392,7 @@ def get_blob_metadata(self, container_name, blob_name, snapshot=None, x_ms_lease
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=metadata'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.query = [('snapshot', _str_or_none(snapshot))]
@@ -411,7 +414,7 @@ def set_blob_metadata(self, container_name, blob_name, x_ms_meta_name_values=Non
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=metadata'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
@@ -435,7 +438,7 @@ def lease_blob(self, container_name, blob_name, x_ms_lease_action, x_ms_lease_id
_validate_not_none('x_ms_lease_action', x_ms_lease_action)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=lease'
request.headers = [
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
@@ -467,7 +470,7 @@ def snapshot_blob(self, container_name, blob_name, x_ms_meta_name_values=None, i
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=snapshot'
request.headers = [
('x-ms-meta-name-values', x_ms_meta_name_values),
@@ -481,6 +484,8 @@ def snapshot_blob(self, container_name, blob_name, x_ms_meta_name_values=None, i
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
+ return _parse_response_for_dict_filter(response, filter=['x-ms-snapshot', 'etag', 'last-modified'])
+
def copy_blob(self, container_name, blob_name, x_ms_copy_source, x_ms_meta_name_values=None, x_ms_source_if_modified_since=None, x_ms_source_if_unmodified_since=None, x_ms_source_if_match=None, x_ms_source_if_none_match=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None, x_ms_lease_id=None, x_ms_source_lease_id=None):
'''
Copies a blob to a destination within the storage account.
@@ -512,7 +517,7 @@ def copy_blob(self, container_name, blob_name, x_ms_copy_source, x_ms_meta_name_
_validate_not_none('x_ms_copy_source', x_ms_copy_source)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + ''
request.headers = [
('x-ms-copy-source', _str_or_none(x_ms_copy_source)),
@@ -551,7 +556,7 @@ def delete_blob(self, container_name, blob_name, snapshot=None, x_ms_lease_id=No
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + ''
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.query = [('snapshot', _str_or_none(snapshot))]
@@ -559,7 +564,7 @@ def delete_blob(self, container_name, blob_name, snapshot=None, x_ms_lease_id=No
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
- def put_block(self, container_name, blob_name, block, blockid, content_m_d5=None, x_ms_lease_id=None):
+ def put_block(self, container_name, blob_name, block, blockid, content_md5=None, x_ms_lease_id=None):
'''
Creates a new block to be committed as part of a blob.
@@ -577,10 +582,10 @@ def put_block(self, container_name, blob_name, block, blockid, content_m_d5=None
_validate_not_none('blockid', blockid)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=block'
request.headers = [
- ('Content-MD5', _str_or_none(content_m_d5)),
+ ('Content-MD5', _str_or_none(content_md5)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id))
]
request.query = [('blockid', base64.b64encode(_str_or_none(blockid)))]
@@ -589,7 +594,7 @@ def put_block(self, container_name, blob_name, block, blockid, content_m_d5=None
request.headers = _update_storage_blob_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
- def put_block_list(self, container_name, blob_name, block_list, content_m_d5=None, x_ms_blob_cache_control=None, x_ms_blob_content_type=None, x_ms_blob_content_encoding=None, x_ms_blob_content_language=None, x_ms_blob_content_md5=None, x_ms_meta_name_values=None, x_ms_lease_id=None):
+ def put_block_list(self, container_name, blob_name, block_list, content_md5=None, x_ms_blob_cache_control=None, x_ms_blob_content_type=None, x_ms_blob_content_encoding=None, x_ms_blob_content_language=None, x_ms_blob_content_md5=None, x_ms_meta_name_values=None, x_ms_lease_id=None):
'''
Writes a blob by specifying the list of block IDs that make up the blob. In order to
be written as part of a blob, a block must have been successfully written to the server
@@ -621,10 +626,10 @@ def put_block_list(self, container_name, blob_name, block_list, content_m_d5=Non
_validate_not_none('block_list', block_list)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=blocklist'
request.headers = [
- ('Content-MD5', _str_or_none(content_m_d5)),
+ ('Content-MD5', _str_or_none(content_md5)),
('x-ms-blob-cache-control', _str_or_none(x_ms_blob_cache_control)),
('x-ms-blob-content-type', _str_or_none(x_ms_blob_content_type)),
('x-ms-blob-content-encoding', _str_or_none(x_ms_blob_content_encoding)),
@@ -653,7 +658,7 @@ def get_block_list(self, container_name, blob_name, snapshot=None, blocklisttype
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=blocklist'
request.headers = [('x-ms-lease-id', _str_or_none(x_ms_lease_id))]
request.query = [
@@ -666,7 +671,7 @@ def get_block_list(self, container_name, blob_name, snapshot=None, blocklisttype
return convert_response_to_block_list(response)
- def put_page(self, container_name, blob_name, page, x_ms_range, x_ms_page_write, timeout=None, content_m_d5=None, x_ms_lease_id=None, x_ms_if_sequence_number_lte=None, x_ms_if_sequence_number_lt=None, x_ms_if_sequence_number_eq=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None):
+ def put_page(self, container_name, blob_name, page, x_ms_range, x_ms_page_write, timeout=None, content_md5=None, x_ms_lease_id=None, x_ms_if_sequence_number_lte=None, x_ms_if_sequence_number_lt=None, x_ms_if_sequence_number_eq=None, if_modified_since=None, if_unmodified_since=None, if_match=None, if_none_match=None):
'''
Writes a range of pages to a page blob.
@@ -694,11 +699,11 @@ def put_page(self, container_name, blob_name, page, x_ms_range, x_ms_page_write,
_validate_not_none('x_ms_page_write', x_ms_page_write)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=page'
request.headers = [
('x-ms-range', _str_or_none(x_ms_range)),
- ('Content-MD5', _str_or_none(content_m_d5)),
+ ('Content-MD5', _str_or_none(content_md5)),
('x-ms-page-write', _str_or_none(x_ms_page_write)),
('x-ms-lease-id', _str_or_none(x_ms_lease_id)),
('x-ms-if-sequence-number-lte', _str_or_none(x_ms_if_sequence_number_lte)),
@@ -733,7 +738,7 @@ def get_page_ranges(self, container_name, blob_name, snapshot=None, range=None,
_validate_not_none('blob_name', blob_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_blob_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(container_name) + '/' + str(blob_name) + '?comp=pagelist'
request.headers = [
('Range', _str_or_none(range)),
diff --git a/src/azure/storage/cloudstorageaccount.py b/src/azure/storage/cloudstorageaccount.py
index 39ea96f9331f..ead0928ac99c 100644
--- a/src/azure/storage/cloudstorageaccount.py
+++ b/src/azure/storage/cloudstorageaccount.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/src/azure/storage/queueservice.py b/src/azure/storage/queueservice.py
index 602f71f7177a..778dcc776f0d 100644
--- a/src/azure/storage/queueservice.py
+++ b/src/azure/storage/queueservice.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,16 +19,15 @@
from azure.storage import *
from azure.storage.storageclient import _StorageClient
from azure.storage import (_update_storage_queue_header)
-from azure.http import HTTPRequest
+from azure.http import HTTPRequest, HTTP_RESPONSE_NO_CONTENT
from azure import (_validate_not_none, Feed,
_convert_response_to_feeds, _str_or_none, _int_or_none,
_get_request_body, _update_request_uri_query,
- _dont_fail_on_exist, _dont_fail_not_exist,
+ _dont_fail_on_exist, _dont_fail_not_exist, WindowsAzureConflictError,
WindowsAzureError, _parse_response, _convert_class_to_xml,
_parse_response_for_dict, _parse_response_for_dict_prefix,
_parse_response_for_dict_filter,
_parse_enum_results_list, _update_request_uri_query_local_storage,
- _get_table_host, _get_queue_host, _get_blob_host,
_parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
class QueueService(_StorageClient):
@@ -38,6 +37,9 @@ class QueueService(_StorageClient):
account_key: your storage account key, required for all operations.
'''
+ def __init__(self, account_name = None, account_key = None, protocol = 'http', host_base = QUEUE_SERVICE_HOST_BASE, dev_host = DEV_QUEUE_HOST):
+ return super(QueueService, self).__init__(account_name, account_key, protocol, host_base, dev_host)
+
def get_queue_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Queue Service, including Windows Azure
@@ -48,7 +50,7 @@ def get_queue_service_properties(self, timeout=None):
'''
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -63,7 +65,7 @@ def list_queues(self, prefix=None, marker=None, maxresults=None, include=None):
'''
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/?comp=list'
request.query = [
('prefix', _str_or_none(prefix)),
@@ -89,20 +91,24 @@ def create_queue(self, queue_name, x_ms_meta_name_values=None, fail_on_exist=Fal
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + ''
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
if not fail_on_exist:
try:
- self._perform_request(request)
+ response = self._perform_request(request)
+ if response.status == HTTP_RESPONSE_NO_CONTENT:
+ return False
return True
except WindowsAzureError as e:
_dont_fail_on_exist(e)
return False
else:
- self._perform_request(request)
+ response = self._perform_request(request)
+ if response.status == HTTP_RESPONSE_NO_CONTENT:
+ raise WindowsAzureConflictError(azure._ERROR_CONFLICT)
return True
def delete_queue(self, queue_name, fail_not_exist=False):
@@ -115,7 +121,7 @@ def delete_queue(self, queue_name, fail_not_exist=False):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + ''
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
@@ -140,7 +146,7 @@ def get_queue_metadata(self, queue_name):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '?comp=metadata'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
@@ -160,7 +166,7 @@ def set_queue_metadata(self, queue_name, x_ms_meta_name_values=None):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '?comp=metadata'
request.headers = [('x-ms-meta-name-values', x_ms_meta_name_values)]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -186,7 +192,7 @@ def put_message(self, queue_name, message_text, visibilitytimeout=None, messaget
_validate_not_none('message_text', message_text)
request = HTTPRequest()
request.method = 'POST'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages'
request.query = [
('visibilitytimeout', _str_or_none(visibilitytimeout)),
@@ -218,7 +224,7 @@ def get_messages(self, queue_name, numofmessages=None, visibilitytimeout=None):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages'
request.query = [
('numofmessages', _str_or_none(numofmessages)),
@@ -243,7 +249,7 @@ def peek_messages(self, queue_name, numofmessages=None):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages?peekonly=true'
request.query = [('numofmessages', _str_or_none(numofmessages))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -265,7 +271,7 @@ def delete_message(self, queue_name, message_id, popreceipt):
_validate_not_none('popreceipt', popreceipt)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages/' + str(message_id) + ''
request.query = [('popreceipt', _str_or_none(popreceipt))]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -281,7 +287,7 @@ def clear_messages(self, queue_name):
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
@@ -308,7 +314,7 @@ def update_message(self, queue_name, message_id, message_text, popreceipt, visib
_validate_not_none('visibilitytimeout', visibilitytimeout)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(queue_name) + '/messages/' + str(message_id) + ''
request.query = [
('popreceipt', _str_or_none(popreceipt)),
@@ -335,12 +341,10 @@ def set_queue_service_properties(self, storage_service_properties, timeout=None)
_validate_not_none('storage_service_properties', storage_service_properties)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_queue_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.query = [('timeout', _int_or_none(timeout))]
request.body = _get_request_body(_convert_class_to_xml(storage_service_properties))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_queue_header(request, self.account_name, self.account_key)
response = self._perform_request(request)
-
-
diff --git a/src/azure/storage/sharedaccesssignature.py b/src/azure/storage/sharedaccesssignature.py
index a7850702fa5c..c80ce63ba59d 100644
--- a/src/azure/storage/sharedaccesssignature.py
+++ b/src/azure/storage/sharedaccesssignature.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/src/azure/storage/storageclient.py b/src/azure/storage/storageclient.py
index 15ff95378a52..1bb259c391e8 100644
--- a/src/azure/storage/storageclient.py
+++ b/src/azure/storage/storageclient.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,7 +18,8 @@
import hashlib
import os
-from azure.storage import _storage_error_handler, X_MS_VERSION
+
+from azure.storage import _storage_error_handler
from azure.http.httpclient import _HTTPClient
from azure.http import HTTPError
from azure import (_parse_response, WindowsAzureError,
@@ -37,11 +38,20 @@ class _StorageClient(object):
This is the base class for BlobManager, TableManager and QueueManager.
'''
- def __init__(self, account_name=None, account_key=None, protocol='http'):
- self.account_name = account_name
- self.account_key = account_key
+ def __init__(self, account_name=None, account_key=None, protocol='http', host_base='', dev_host=''):
+ if account_name is not None:
+ self.account_name = account_name.encode('ascii', 'ignore')
+ else:
+ self.account_name = None
+ if account_key is not None:
+ self.account_key = account_key.encode('ascii', 'ignore')
+ else:
+ self.account_key = None
+
self.requestid = None
self.protocol = protocol
+ self.host_base = host_base
+ self.dev_host = dev_host
#the app is not run in azure emulator or use default development
#storage account and key if app is run in emulator.
@@ -60,7 +70,7 @@ def __init__(self, account_name=None, account_key=None, protocol='http'):
#get the account and key from environment variables if the app is not run
#in azure emulator or use default development storage account and key if
#app is run in emulator.
- if not account_name or not account_key:
+ if not self.account_name or not self.account_key:
if self.is_emulated:
self.account_name = DEV_ACCOUNT_NAME
self.account_key = DEV_ACCOUNT_KEY
@@ -70,15 +80,11 @@ def __init__(self, account_name=None, account_key=None, protocol='http'):
self.account_name = os.environ[AZURE_STORAGE_ACCOUNT]
if os.environ.has_key(AZURE_STORAGE_ACCESS_KEY):
self.account_key = os.environ[AZURE_STORAGE_ACCESS_KEY]
- else:
- self.account_name = account_name
- self.account_key = account_key
if not self.account_name or not self.account_key:
raise WindowsAzureError(azure._ERROR_STORAGE_MISSING_INFO)
- self.x_ms_version = X_MS_VERSION
- self._httpclient = _HTTPClient(service_instance=self, account_key=account_key, account_name=account_name, x_ms_version=self.x_ms_version, protocol=protocol)
+ self._httpclient = _HTTPClient(service_instance=self, account_key=self.account_key, account_name=self.account_name, protocol=protocol)
self._batchclient = None
self._filter = self._perform_request_worker
@@ -97,6 +103,16 @@ def new_filter(request):
res._filter = new_filter
return res
+ def set_proxy(self, host, port):
+ '''Sets the proxy server host and port for the HTTP CONNECT Tunnelling.'''
+ self._httpclient.set_proxy(host, port)
+
+ def _get_host(self):
+ if self.use_local_storage:
+ return self.dev_host
+ else:
+ return self.account_name + self.host_base
+
def _perform_request_worker(self, request):
return self._httpclient.perform_request(request)
@@ -111,6 +127,4 @@ def _perform_request(self, request):
except HTTPError as e:
_storage_error_handler(e)
- if not resp:
- return None
return resp
\ No newline at end of file
diff --git a/src/azure/storage/tableservice.py b/src/azure/storage/tableservice.py
index 722342756571..4240009385fc 100644
--- a/src/azure/storage/tableservice.py
+++ b/src/azure/storage/tableservice.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,16 +23,15 @@
convert_entity_to_xml, _convert_response_to_entity,
_convert_xml_to_entity, _sign_storage_table_request)
from azure.http.batchclient import _BatchClient
-from azure.http import HTTPRequest
+from azure.http import HTTPRequest, HTTP_RESPONSE_NO_CONTENT
from azure import (_validate_not_none, Feed,
_convert_response_to_feeds, _str_or_none, _int_or_none,
_get_request_body, _update_request_uri_query,
- _dont_fail_on_exist, _dont_fail_not_exist,
+ _dont_fail_on_exist, _dont_fail_not_exist, WindowsAzureConflictError,
WindowsAzureError, _parse_response, _convert_class_to_xml,
_parse_response_for_dict, _parse_response_for_dict_prefix,
_parse_response_for_dict_filter,
_parse_enum_results_list, _update_request_uri_query_local_storage,
- _get_table_host, _get_queue_host, _get_blob_host,
_parse_simple_list, SERVICE_BUS_HOST_BASE, xml_escape)
class TableService(_StorageClient):
@@ -42,6 +41,9 @@ class TableService(_StorageClient):
account_key: your storage account key, required for all operations.
'''
+ def __init__(self, account_name = None, account_key = None, protocol = 'http', host_base = TABLE_SERVICE_HOST_BASE, dev_host = DEV_TABLE_HOST):
+ return super(TableService, self).__init__(account_name, account_key, protocol, host_base, dev_host)
+
def begin_batch(self):
if self._batchclient is None:
self._batchclient = _BatchClient(service_instance=self, account_key=self.account_key, account_name=self.account_name)
@@ -64,7 +66,7 @@ def get_table_service_properties(self):
'''
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
@@ -81,7 +83,7 @@ def set_table_service_properties(self, storage_service_properties):
_validate_not_none('storage_service_properties', storage_service_properties)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/?restype=service&comp=properties'
request.body = _get_request_body(_convert_class_to_xml(storage_service_properties))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -90,7 +92,7 @@ def set_table_service_properties(self, storage_service_properties):
return _parse_response_for_dict(response)
- def query_tables(self, table_name = None, top=None):
+ def query_tables(self, table_name = None, top=None, next_table_name=None):
'''
Returns a list of tables under the specified account.
@@ -99,13 +101,16 @@ def query_tables(self, table_name = None, top=None):
'''
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
if table_name is not None:
uri_part_table_name = "('" + table_name + "')"
else:
uri_part_table_name = ""
request.path = '/Tables' + uri_part_table_name + ''
- request.query = [('$top', _int_or_none(top))]
+ request.query = [
+ ('$top', _int_or_none(top)),
+ ('NextTableName', _str_or_none(next_table_name))
+ ]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
@@ -116,13 +121,15 @@ def create_table(self, table, fail_on_exist=False):
'''
Creates a new table in the storage account.
- table: name of the table to create.
+ table: name of the table to create. Table name may contain only alphanumeric characters
+ and cannot begin with a numeric character. It is case-insensitive and must be from
+ 3 to 63 characters long.
fail_on_exist: specify whether throw exception when table exists.
'''
_validate_not_none('table', table)
request = HTTPRequest()
request.method = 'POST'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/Tables'
request.body = _get_request_body(convert_table_to_xml(table))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
@@ -147,7 +154,7 @@ def delete_table(self, table_name, fail_not_exist=False):
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/Tables(\'' + str(table_name) + '\')'
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
@@ -176,7 +183,7 @@ def get_entity(self, table_name, partition_key, row_key, select=''):
_validate_not_none('select', select)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')?$select=' + str(select) + ''
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
@@ -184,7 +191,7 @@ def get_entity(self, table_name, partition_key, row_key, select=''):
return _convert_response_to_entity(response)
- def query_entities(self, table_name, filter=None, select=None, top=None):
+ def query_entities(self, table_name, filter=None, select=None, top=None, next_partition_key=None, next_row_key=None):
'''
Get entities in a table; includes the $filter and $select options.
@@ -196,12 +203,14 @@ def query_entities(self, table_name, filter=None, select=None, top=None):
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'GET'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(table_name) + '()'
request.query = [
('$filter', _str_or_none(filter)),
('$select', _str_or_none(select)),
- ('$top', _int_or_none(top))
+ ('$top', _int_or_none(top)),
+ ('NextPartitionKey', _str_or_none(next_partition_key)),
+ ('NextRowKey', _str_or_none(next_row_key))
]
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
@@ -221,7 +230,7 @@ def insert_entity(self, table_name, entity, content_type='application/atom+xml')
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'POST'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(table_name) + ''
request.headers = [('Content-Type', _str_or_none(content_type))]
request.body = _get_request_body(convert_entity_to_xml(entity))
@@ -229,6 +238,8 @@ def insert_entity(self, table_name, entity, content_type='application/atom+xml')
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
+ return _convert_response_to_entity(response)
+
def update_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml', if_match='*'):
'''
Updates an existing entity in a table. The Update Entity operation replaces the entire
@@ -246,7 +257,7 @@ def update_entity(self, table_name, partition_key, row_key, entity, content_type
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')'
request.headers = [
('Content-Type', _str_or_none(content_type)),
@@ -257,6 +268,8 @@ def update_entity(self, table_name, partition_key, row_key, entity, content_type
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
+ return _parse_response_for_dict_filter(response, filter=['etag'])
+
def merge_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml', if_match='*'):
'''
Updates an existing entity by updating the entity's properties. This operation does
@@ -274,7 +287,7 @@ def merge_entity(self, table_name, partition_key, row_key, entity, content_type=
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'MERGE'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')'
request.headers = [
('Content-Type', _str_or_none(content_type)),
@@ -285,6 +298,8 @@ def merge_entity(self, table_name, partition_key, row_key, entity, content_type=
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
+ return _parse_response_for_dict_filter(response, filter=['etag'])
+
def delete_entity(self, table_name, partition_key, row_key, content_type='application/atom+xml', if_match='*'):
'''
Deletes an existing entity in a table.
@@ -302,7 +317,7 @@ def delete_entity(self, table_name, partition_key, row_key, content_type='applic
_validate_not_none('if_match', if_match)
request = HTTPRequest()
request.method = 'DELETE'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')'
request.headers = [
('Content-Type', _str_or_none(content_type)),
@@ -330,7 +345,7 @@ def insert_or_replace_entity(self, table_name, partition_key, row_key, entity, c
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'PUT'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')'
request.headers = [('Content-Type', _str_or_none(content_type))]
request.body = _get_request_body(convert_entity_to_xml(entity))
@@ -338,7 +353,9 @@ def insert_or_replace_entity(self, table_name, partition_key, row_key, entity, c
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
- def insert_or_merge_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml', if_match='*'):
+ return _parse_response_for_dict_filter(response, filter=['etag'])
+
+ def insert_or_merge_entity(self, table_name, partition_key, row_key, entity, content_type='application/atom+xml'):
'''
Merges an existing entity or inserts a new entity if it does not exist in the table.
Because this operation can insert or update an entity, it is also known as an "upsert"
@@ -356,17 +373,16 @@ def insert_or_merge_entity(self, table_name, partition_key, row_key, entity, con
_validate_not_none('content_type', content_type)
request = HTTPRequest()
request.method = 'MERGE'
- request.host = _get_table_host(self.account_name, self.use_local_storage)
+ request.host = self._get_host()
request.path = '/' + str(table_name) + '(PartitionKey=\'' + str(partition_key) + '\',RowKey=\'' + str(row_key) + '\')'
- request.headers = [
- ('Content-Type', _str_or_none(content_type)),
- ('If-Match', _str_or_none(if_match))
- ]
+ request.headers = [('Content-Type', _str_or_none(content_type))]
request.body = _get_request_body(convert_entity_to_xml(entity))
request.path, request.query = _update_request_uri_query_local_storage(request, self.use_local_storage)
request.headers = _update_storage_table_header(request)
response = self._perform_request(request)
+ return _parse_response_for_dict_filter(response, filter=['etag'])
+
def _perform_request_worker(self, request):
auth = _sign_storage_table_request(request,
@@ -374,5 +390,3 @@ def _perform_request_worker(self, request):
self.account_key)
request.headers.append(('Authorization', auth))
return self._httpclient.perform_request(request)
-
-
diff --git a/src/build.bat b/src/build.bat
index 17d39bcde4af..b1aa2e403a09 100644
--- a/src/build.bat
+++ b/src/build.bat
@@ -1,15 +1,17 @@
@echo OFF
REM----------------------------------------------------------------------------
-REM Copyright (c) Microsoft Corporation.
+REM Copyright (c) Microsoft. All rights reserved.
REM
-REM This source code is subject to terms and conditions of the Apache License,
-REM Version 2.0. A copy of the license can be found in the License.html file at
-REM the root of this distribution. If you cannot locate the Apache License,
-REM Version 2.0, please send an email to vspython@microsoft.com. By using this
-REM source code in any fashion, you are agreeing to be bound by the terms of the
-REM Apache License, Version 2.0.
+REM Licensed under the Apache License, Version 2.0 (the "License");
+REM you may not use this file except in compliance with the License.
+REM You may obtain a copy of the License at
+REM http://www.apache.org/licenses/LICENSE-2.0
REM
-REM You must not remove this notice, or any other, from this software.
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
REM----------------------------------------------------------------------------
cls
diff --git a/src/codegenerator/blob_input.txt b/src/codegenerator/blob_input.txt
index 550b5dec6939..55b52f616939 100644
--- a/src/codegenerator/blob_input.txt
+++ b/src/codegenerator/blob_input.txt
@@ -21,7 +21,8 @@ marker: Optional. A string value that identifies the portion of the list to be r
with the next list operation.
maxresults: Optional. Specifies the maximum number of containers to return.
include: Optional. Include this parameter to specify that the container's metadata be
- returned as part of the response body.
+ returned as part of the response body. set this parameter to string 'metadata' to
+ get container's metadata.
[return]
ContainerEnumResults
[url]
@@ -243,7 +244,7 @@ container_name: the name of container to get the blob
blob_name: the name of blob
x_ms_range: Optional. Return only the bytes of the blob in the specified range.
[return]
-str
+BlobResult
[url]
GET http://.blob.core.windows.net//
[query]
@@ -320,6 +321,8 @@ x_ms_lease_id: Optional. If this header is specified, the operation will be perf
1. The blob's lease is currently active
2. The lease ID specified in the request matches that of the blob.
[return]
+dict
+filter=['x-ms-snapshot', 'etag', 'last-modified']
[url]
PUT http://.blob.core.windows.net//?comp=snapshot
[query]
diff --git a/src/codegenerator/codegenerator.py b/src/codegenerator/codegenerator.py
index 587df94d6dae..e6d5c7b04687 100644
--- a/src/codegenerator/codegenerator.py
+++ b/src/codegenerator/codegenerator.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -40,8 +40,8 @@ def to_legalname(name):
if ch.isupper():
legalname += '_'
legalname += ch
- legalname = legalname.replace('__', '_').replace('_m_d5', '_md5')
- return legalname.lower()
+ legalname = legalname.replace('__', '_').lower().replace('_m_d5', '_md5')
+ return legalname
def normalize_xml(xmlstr):
if xmlstr:
@@ -99,7 +99,7 @@ def output_import(output_file, class_name):
output_str += 'from azure.storage import *\n'
output_str += 'from azure.storage.storageclient import _StorageClient\n'
if 'Blob' in class_name:
- output_str += 'from azure.storage import (_update_storage_blob_header,\n'
+ output_str += 'from azure.storage import (_update_storage_blob_header, _create_blob_result,\n'
output_str += indent*8 + 'convert_block_list_to_xml, convert_response_to_block_list) \n'
elif 'Queue' in class_name:
output_str += 'from azure.storage import (_update_storage_queue_header)\n'
@@ -115,7 +115,7 @@ def output_import(output_file, class_name):
output_str += 'from azure import (_validate_not_none, Feed,\n'
output_str += indent*8 + '_convert_response_to_feeds, _str_or_none, _int_or_none,\n'
output_str += indent*8 + '_get_request_body, _update_request_uri_query, \n'
- output_str += indent*8 + '_dont_fail_on_exist, _dont_fail_not_exist, \n'
+ output_str += indent*8 + '_dont_fail_on_exist, _dont_fail_not_exist, WindowsAzureConflictError, \n'
output_str += indent*8 + 'WindowsAzureError, _parse_response, _convert_class_to_xml, \n'
output_str += indent*8 + '_parse_response_for_dict, _parse_response_for_dict_prefix, \n'
output_str += indent*8 + '_parse_response_for_dict_filter, \n'
@@ -260,7 +260,7 @@ def output_list(list_name, request_list, validate_conversions):
return output_list_str
-def output_method_body(return_type, method_params, uri_param, req_protocol, req_host, host_param, req_method, req_uri, req_query, req_header, req_body, req_param):
+def output_method_body(method_name, return_type, method_params, uri_param, req_protocol, req_host, host_param, req_method, req_uri, req_query, req_header, req_body, req_param):
indent = ' '
output_body = ''.join([indent*2, 'request = HTTPRequest()\n'])
@@ -341,16 +341,32 @@ def output_method_body(return_type, method_params, uri_param, req_protocol, req_
for name, value in method_params:
if 'fail_on_exist' in name:
- output_body += indent*2 + 'if not ' + name + ':\n'
- output_body += indent*3 + 'try:\n'
- output_body += ''.join([indent*4, 'self._perform_request(request)\n'])
- output_body += ''.join([indent*4, 'return True\n'])
- output_body += indent*3 + 'except WindowsAzureError as e:\n'
- output_body += indent*4 + '_dont_fail_on_exist(e)\n'
- output_body += indent*4 + 'return False\n'
- output_body += indent*2 + 'else:\n'
- output_body += ''.join([indent*3, 'self._perform_request(request)\n'])
- output_body += ''.join([indent*3, 'return True\n\n'])
+ if method_name == 'create_queue' and 'queue.core' in req_host: #QueueService create_queue
+ output_body += indent*2 + 'if not ' + name + ':\n'
+ output_body += indent*3 + 'try:\n'
+ output_body += ''.join([indent*4, 'response = self._perform_request(request)\n'])
+ output_body += ''.join([indent*4, 'if response.status == 204:\n'])
+ output_body += ''.join([indent*5, 'return False\n'])
+ output_body += ''.join([indent*4, 'return True\n'])
+ output_body += indent*3 + 'except WindowsAzureError as e:\n'
+ output_body += indent*4 + '_dont_fail_on_exist(e)\n'
+ output_body += indent*4 + 'return False\n'
+ output_body += indent*2 + 'else:\n'
+ output_body += ''.join([indent*3, 'response = self._perform_request(request)\n'])
+ output_body += ''.join([indent*3, 'if response.status == 204:\n'])
+ output_body += ''.join([indent*4, 'raise WindowsAzureConflictError(azure._ERROR_CONFLICT)\n'])
+ output_body += ''.join([indent*3, 'return True\n\n'])
+ else:
+ output_body += indent*2 + 'if not ' + name + ':\n'
+ output_body += indent*3 + 'try:\n'
+ output_body += ''.join([indent*4, 'self._perform_request(request)\n'])
+ output_body += ''.join([indent*4, 'return True\n'])
+ output_body += indent*3 + 'except WindowsAzureError as e:\n'
+ output_body += indent*4 + '_dont_fail_on_exist(e)\n'
+ output_body += indent*4 + 'return False\n'
+ output_body += indent*2 + 'else:\n'
+ output_body += ''.join([indent*3, 'self._perform_request(request)\n'])
+ output_body += ''.join([indent*3, 'return True\n\n'])
break
elif 'fail_not_exist' in name:
output_body += indent*2 + 'if not ' + name + ':\n'
@@ -383,13 +399,15 @@ def output_method_body(return_type, method_params, uri_param, req_protocol, req_
elif return_type == 'PageList':
output_body += indent*2 + 'return _parse_simple_list(response, PageList, PageRange, "page_ranges")'
else:
- if return_type == 'Message':
+ if return_type == 'BlobResult':
+ output_body += indent*2 + 'return _create_blob_result(response)\n\n'
+ elif return_type == 'Message':
output_body += indent*2 + 'return _create_message(response, self)\n\n'
elif return_type == 'str':
output_body += indent*2 + 'return response.body\n\n'
elif return_type == 'BlobBlockList':
output_body += indent*2 + 'return convert_response_to_block_list(response)\n\n'
- elif 'Feed' in return_type:
+ elif 'Feed' in return_type:
for name in ['table', 'entity', 'topic', 'subscription', 'queue', 'rule']:
if name +'\'),' in return_type:
convert_func = '_convert_xml_to_' + name
@@ -412,7 +430,7 @@ def output_method(output_file, method_name, method_params, method_comment, retur
output_str += output_method_def(method_name, method_params, uri_param, req_param, req_query, req_header)
output_str += output_method_comments(method_comment, req_param, req_query, req_header)
output_str += output_method_validates(uri_param, req_param, req_query, req_header)
- output_str += output_method_body(return_type, method_params, uri_param, req_protocol, req_host, host_param, req_method, req_uri, req_query, req_header, req_body, req_param)
+ output_str += output_method_body(method_name, return_type, method_params, uri_param, req_protocol, req_host, host_param, req_method, req_uri, req_query, req_header, req_body, req_param)
output_file.write(output_str)
@@ -686,20 +704,32 @@ def auto_codegen(source_filename, output_filename='output.py'):
auto_codegen('queue_input.txt', '../azure/storage/queueservice.py')
auto_codegen('servicebus_input.txt', '../azure/servicebus/servicebusservice.py')
- def add_license(license_file_name, output_file_name):
- license_file = open(license_file_name, 'r')
+ def add_license(license_str, output_file_name):
output_file = open(output_file_name, 'r')
content = output_file.read()
- license_txt = license_file.read()
- license_file.close()
output_file.close()
output_file = open(output_file_name, 'w')
- output_file.write(license_txt)
+ output_file.write(license_str)
output_file.write(content)
output_file.close()
+ license_str = '''#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+'''
- add_license('license.txt', '../azure/storage/blobservice.py')
- add_license('license.txt', '../azure/storage/tableservice.py')
- add_license('license.txt', '../azure/storage/queueservice.py')
- add_license('license.txt', '../azure/servicebus/servicebusservice.py')
\ No newline at end of file
+ add_license(license_str, '../azure/storage/blobservice.py')
+ add_license(license_str, '../azure/storage/tableservice.py')
+ add_license(license_str, '../azure/storage/queueservice.py')
+ add_license(license_str, '../azure/servicebus/servicebusservice.py')
\ No newline at end of file
diff --git a/src/codegenerator/servicebus_input.txt b/src/codegenerator/servicebus_input.txt
index 226fbf12b640..8aac7ac4edd5 100644
--- a/src/codegenerator/servicebus_input.txt
+++ b/src/codegenerator/servicebus_input.txt
@@ -470,8 +470,6 @@ def _perform_request(self, request):
except HTTPError as e:
return _service_bus_error_handler(e)
- if not resp:
- return None
return resp
[end]
diff --git a/src/codegenerator/table_input.txt b/src/codegenerator/table_input.txt
index 5cb5c124e67c..be01432922c8 100644
--- a/src/codegenerator/table_input.txt
+++ b/src/codegenerator/table_input.txt
@@ -46,13 +46,16 @@ top: the maximum number of tables to return
GET http://.table.core.windows.net/Tables
[query]
$top=
+NextTableName=
[method]
create_table
[comment]
Creates a new table in the storage account.
-table: name of the table to create.
+table: name of the table to create. Table name may contain only alphanumeric characters
+ and cannot begin with a numeric character. It is case-insensitive and must be from
+ 3 to 63 characters long.
fail_on_exist: specify whether throw exception when table exists.
[params]
fail_on_exist=False
@@ -105,6 +108,8 @@ GET http://.table.core.windows.net/()
$filter=
$select=
$top=
+NextPartitionKey=
+NextRowKey=
[method]
insert_entity
@@ -113,6 +118,7 @@ Inserts a new entity into a table.
entity: Required. The entity object to insert. Could be a dict format or entity object.
[return]
+Feed('entity')
[url]
POST http://.table.core.windows.net/
[requestheader]
@@ -130,6 +136,8 @@ entity: Required. The entity object to insert. Could be a dict format or entity
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
[return]
+dict
+filter=['etag']
[url]
PUT http://.table.core.windows.net/(PartitionKey=\'\',RowKey=\'\')
[requestheader]
@@ -148,6 +156,8 @@ entity: Required. The entity object to insert. Can be a dict format or entity ob
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
[return]
+dict
+filter=['etag']
[url]
MERGE http://.table.core.windows.net/(PartitionKey=\'\',RowKey=\'\')
[requestheader]
@@ -183,6 +193,8 @@ entity: Required. The entity object to insert. Could be a dict format or entity
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
[return]
+dict
+filter=['etag']
[url]
PUT http://.table.core.windows.net/(PartitionKey=\'\',RowKey=\'\')
[requestheader]
@@ -201,11 +213,12 @@ entity: Required. The entity object to insert. Could be a dict format or entity
partition_key: PartitionKey of the entity.
row_key: RowKey of the entity.
[return]
+dict
+filter=['etag']
[url]
MERGE http://.table.core.windows.net/(PartitionKey=\'\',RowKey=\'\')
[requestheader]
Content-Type=application/atom+xml;required:application/atom+xml|#this is required and has to be set to application/atom+xml
-If-Match=*
[requestbody]
feed:entity;required:feed
diff --git a/src/install.bat b/src/install.bat
index f0a169369c8b..be9180d5b51f 100644
--- a/src/install.bat
+++ b/src/install.bat
@@ -1,15 +1,17 @@
@echo OFF
REM----------------------------------------------------------------------------
-REM Copyright (c) Microsoft Corporation.
+REM Copyright (c) Microsoft. All rights reserved.
REM
-REM This source code is subject to terms and conditions of the Apache License,
-REM Version 2.0. A copy of the license can be found in the License.html file at
-REM the root of this distribution. If you cannot locate the Apache License,
-REM Version 2.0, please send an email to vspython@microsoft.com. By using this
-REM source code in any fashion, you are agreeing to be bound by the terms of the
-REM Apache License, Version 2.0.
+REM Licensed under the Apache License, Version 2.0 (the "License");
+REM you may not use this file except in compliance with the License.
+REM You may obtain a copy of the License at
+REM http://www.apache.org/licenses/LICENSE-2.0
REM
-REM You must not remove this notice, or any other, from this software.
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
REM----------------------------------------------------------------------------
cls
diff --git a/src/installfrompip.bat b/src/installfrompip.bat
index 5b5fbfb091d6..c6e711ce418d 100644
--- a/src/installfrompip.bat
+++ b/src/installfrompip.bat
@@ -1,15 +1,17 @@
@echo OFF
REM----------------------------------------------------------------------------
-REM Copyright (c) Microsoft Corporation.
+REM Copyright (c) Microsoft. All rights reserved.
REM
-REM This source code is subject to terms and conditions of the Apache License,
-REM Version 2.0. A copy of the license can be found in the License.html file at
-REM the root of this distribution. If you cannot locate the Apache License,
-REM Version 2.0, please send an email to vspython@microsoft.com. By using this
-REM source code in any fashion, you are agreeing to be bound by the terms of the
-REM Apache License, Version 2.0.
+REM Licensed under the Apache License, Version 2.0 (the "License");
+REM you may not use this file except in compliance with the License.
+REM You may obtain a copy of the License at
+REM http://www.apache.org/licenses/LICENSE-2.0
REM
-REM You must not remove this notice, or any other, from this software.
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
REM----------------------------------------------------------------------------
cls
diff --git a/src/setup.py b/src/setup.py
index e40709494cb6..3f38f1e8c237 100644
--- a/src/setup.py
+++ b/src/setup.py
@@ -1,26 +1,29 @@
#!/usr/bin/env python
-#------------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation.
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
#
-# This source code is subject to terms and conditions of the Apache License,
-# Version 2.0. A copy of the license can be found in the License.html file at
-# the root of this distribution. If you cannot locate the Apache License,
-# Version 2.0, please send an email to vspython@microsoft.com. By using this
-# source code in any fashion, you are agreeing to be bound by the terms of the
-# Apache License, Version 2.0.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# You must not remove this notice, or any other, from this software.
-#------------------------------------------------------------------------------
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
from distutils.core import setup
setup(name='azure',
- version='0.2.3',
+ version='0.6',
description='Windows Azure client APIs',
url='https://github.com/WindowsAzure/azure-sdk-for-python',
packages=['azure',
'azure.http',
'azure.servicebus',
- 'azure.storage']
+ 'azure.storage',
+ 'azure.servicemanagement']
)
diff --git a/src/upload.bat b/src/upload.bat
index 3e953e29013a..cc12d7da0c0d 100644
--- a/src/upload.bat
+++ b/src/upload.bat
@@ -1,15 +1,17 @@
@echo OFF
REM----------------------------------------------------------------------------
-REM Copyright (c) Microsoft Corporation.
+REM Copyright (c) Microsoft. All rights reserved.
REM
-REM This source code is subject to terms and conditions of the Apache License,
-REM Version 2.0. A copy of the license can be found in the License.html file at
-REM the root of this distribution. If you cannot locate the Apache License,
-REM Version 2.0, please send an email to vspython@microsoft.com. By using this
-REM source code in any fashion, you are agreeing to be bound by the terms of the
-REM Apache License, Version 2.0.
+REM Licensed under the Apache License, Version 2.0 (the "License");
+REM you may not use this file except in compliance with the License.
+REM You may obtain a copy of the License at
+REM http://www.apache.org/licenses/LICENSE-2.0
REM
-REM You must not remove this notice, or any other, from this software.
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
REM----------------------------------------------------------------------------
cls
diff --git a/test/azuretest.pyproj b/test/azuretest.pyproj
index 2fe4cb6bcb84..507971137014 100644
--- a/test/azuretest.pyproj
+++ b/test/azuretest.pyproj
@@ -6,7 +6,7 @@
{c0742a2d-4862-40e4-8a28-036eecdbc614}
- azuretest\test_tableservice.py
+ azuretest\test_servicemanagementservice.py
.
.
azuretest
@@ -19,8 +19,13 @@
localhost/1/Core/
X86
False
- 2af0f10d-7135-4994-9156-5d01c9c11b7e
+ 9a7a9026-48c1-4688-9d5d-e5699d47d074
2.7
+ ..\src\;..\test\
+ $/TCWCS/Python/Main/Open_Source/Incubation/windowsazure/test
+ {4CA58AB2-18FA-4F8D-95D4-32DDF27D184C}
+ http://tcvstf:8080/tfs/tc
+ .
true
@@ -32,16 +37,29 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/test/azuretest/__init__.py b/test/azuretest/__init__.py
index 330ef2588479..356b5e002a1d 100644
--- a/test/azuretest/__init__.py
+++ b/test/azuretest/__init__.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/test/azuretest/clean.py b/test/azuretest/clean.py
new file mode 100644
index 000000000000..164fb886bc77
--- /dev/null
+++ b/test/azuretest/clean.py
@@ -0,0 +1,78 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+
+from azure import *
+from azure.storage import *
+from azure.servicebus import *
+from azuretest.util import *
+
+print('WARNING!!!')
+print('')
+print('This program cleans the storage account and the service namespace specified')
+print('by the unit test credentials file (windowsazurecredentials.json) located in')
+print('your home directory.')
+print('')
+print('You should not run this program while tests are running as this will')
+print('interfere with the tests.')
+print('')
+print('The following will be deleted from the storage account:')
+print(' - All containers')
+print(' - All tables')
+print(' - All queues')
+print('')
+print('The following will be deleted from the service namespace:')
+print(' - All queues')
+print(' - All topics')
+print('')
+print('Enter YES to proceed, or anything else to cancel')
+print('')
+
+input = raw_input('>')
+if input == 'YES':
+ print('Cleaning storage account...')
+
+ bc = BlobService(credentials.getStorageServicesName(),
+ credentials.getStorageServicesKey())
+
+ ts = TableService(credentials.getStorageServicesName(),
+ credentials.getStorageServicesKey())
+
+ qs = QueueService(credentials.getStorageServicesName(),
+ credentials.getStorageServicesKey())
+
+ for container in bc.list_containers():
+ bc.delete_container(container.name)
+
+ for table in ts.query_tables():
+ ts.delete_table(table.name)
+
+ for queue in qs.list_queues():
+ qs.delete_queue(queue.name)
+
+ print('Cleaning service namespace...')
+
+ sbs = ServiceBusService(credentials.getServiceBusNamespace(),
+ credentials.getServiceBusKey(),
+ 'owner')
+
+ for queue in sbs.list_queues():
+ sbs.delete_queue(queue.name)
+
+ for topic in sbs.list_topics():
+ sbs.delete_topic(topic.name)
+
+ print('Done.')
+else:
+ print('Canceled.')
diff --git a/test/azuretest/data/WindowsAzure1.cspkg b/test/azuretest/data/WindowsAzure1.cspkg
new file mode 100644
index 000000000000..385a79d1c417
Binary files /dev/null and b/test/azuretest/data/WindowsAzure1.cspkg differ
diff --git a/test/azuretest/data/test.vhd b/test/azuretest/data/test.vhd
new file mode 100644
index 000000000000..28b20ae4f1bc
Binary files /dev/null and b/test/azuretest/data/test.vhd differ
diff --git a/test/azuretest/doctest_blobservice.py b/test/azuretest/doctest_blobservice.py
new file mode 100644
index 000000000000..3645ff3d924c
--- /dev/null
+++ b/test/azuretest/doctest_blobservice.py
@@ -0,0 +1,65 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+
+"""
+How to: Create a Container
+--------------------------
+>>> from azure.storage import *
+>>> blob_service = BlobService(name, key)
+>>> blob_service.create_container('mycontainer')
+True
+
+>>> blob_service.create_container('mycontainer2', x_ms_blob_public_access='container')
+True
+
+>>> blob_service.set_container_acl('mycontainer', x_ms_blob_public_access='container')
+
+How to: Upload a Blob into a Container
+--------------------------------------
+>>> myblob = 'hello blob'
+>>> blob_service.put_blob('mycontainer', 'myblob', myblob, x_ms_blob_type='BlockBlob')
+
+How to: List the Blobs in a Container
+-------------------------------------
+>>> blobs = blob_service.list_blobs('mycontainer')
+>>> for blob in blobs:
+... print(blob.name)
+myblob
+
+How to: Download Blobs
+----------------------
+>>> blob = blob_service.get_blob('mycontainer', 'myblob')
+>>> print(blob)
+hello blob
+
+How to: Delete a Blob
+---------------------
+>>> blob_service.delete_blob('mycontainer', 'myblob')
+
+>>> blob_service.delete_container('mycontainer')
+True
+
+>>> blob_service.delete_container('mycontainer2')
+True
+
+"""
+from azuretest.util import *
+
+name = credentials.getStorageServicesName()
+key = credentials.getStorageServicesKey()
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/test/azuretest/doctest_queueservice.py b/test/azuretest/doctest_queueservice.py
new file mode 100644
index 000000000000..ae0730bd6b47
--- /dev/null
+++ b/test/azuretest/doctest_queueservice.py
@@ -0,0 +1,81 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+
+"""
+How To: Create a Queue
+----------------------
+>>> from azure.storage import *
+>>> queue_service = QueueService(name, key)
+>>> queue_service.create_queue('taskqueue')
+True
+
+How To: Insert a Message into a Queue
+-------------------------------------
+>>> queue_service.put_message('taskqueue', 'Hello World')
+
+How To: Peek at the Next Message
+--------------------------------
+>>> messages = queue_service.peek_messages('taskqueue')
+>>> for message in messages:
+... print(message.message_text)
+...
+Hello World
+
+How To: Dequeue the Next Message
+--------------------------------
+>>> messages = queue_service.get_messages('taskqueue')
+>>> for message in messages:
+... print(message.message_text)
+... queue_service.delete_message('taskqueue', message.message_id, message.pop_receipt)
+Hello World
+
+How To: Change the Contents of a Queued Message
+-----------------------------------------------
+>>> queue_service.put_message('taskqueue', 'Hello World')
+>>> messages = queue_service.get_messages('taskqueue')
+>>> for message in messages:
+... res = queue_service.update_message('taskqueue', message.message_id, 'Hello World Again', message.pop_receipt, 0)
+
+How To: Additional Options for Dequeuing Messages
+-------------------------------------------------
+>>> queue_service.put_message('taskqueue', 'Hello World')
+>>> messages = queue_service.get_messages('taskqueue', numofmessages=16, visibilitytimeout=5*60)
+>>> for message in messages:
+... print(message.message_text)
+... queue_service.delete_message('taskqueue', message.message_id, message.pop_receipt)
+Hello World Again
+Hello World
+
+How To: Get the Queue Length
+----------------------------
+>>> queue_metadata = queue_service.get_queue_metadata('taskqueue')
+>>> count = queue_metadata['x-ms-approximate-messages-count']
+>>> count
+u'0'
+
+How To: Delete a Queue
+----------------------
+>>> queue_service.delete_queue('taskqueue')
+True
+
+"""
+from azuretest.util import *
+
+name = credentials.getStorageServicesName()
+key = credentials.getStorageServicesKey()
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/test/azuretest/doctest_servicebusservicequeue.py b/test/azuretest/doctest_servicebusservicequeue.py
new file mode 100644
index 000000000000..408beaae6cd3
--- /dev/null
+++ b/test/azuretest/doctest_servicebusservicequeue.py
@@ -0,0 +1,64 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+
+"""
+How To: Create a Queue
+----------------------
+>>> from azure.servicebus import *
+>>> bus_service = ServiceBusService(ns, key, 'owner')
+>>> bus_service.create_queue('taskqueue')
+True
+
+>>> queue_options = Queue()
+>>> queue_options.max_size_in_megabytes = '5120'
+>>> queue_options.default_message_time_to_live = 'PT1M'
+>>> bus_service.create_queue('taskqueue2', queue_options)
+True
+
+How to Send Messages to a Queue
+-------------------------------
+>>> msg = Message('Test Message')
+>>> bus_service.send_queue_message('taskqueue', msg)
+
+How to Receive Messages from a Queue
+------------------------------------
+>>> msg = bus_service.receive_queue_message('taskqueue')
+>>> print(msg.body)
+Test Message
+
+>>> msg = Message('Test Message')
+>>> bus_service.send_queue_message('taskqueue', msg)
+
+>>> msg = bus_service.receive_queue_message('taskqueue', peek_lock=True)
+>>> print(msg.body)
+Test Message
+>>> msg.delete()
+
+
+>>> bus_service.delete_queue('taskqueue')
+True
+
+>>> bus_service.delete_queue('taskqueue2')
+True
+
+"""
+from azuretest.util import *
+
+ns = credentials.getServiceBusNamespace()
+key = credentials.getServiceBusKey()
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/test/azuretest/doctest_servicebusservicetopic.py b/test/azuretest/doctest_servicebusservicetopic.py
new file mode 100644
index 000000000000..ad9b1908979b
--- /dev/null
+++ b/test/azuretest/doctest_servicebusservicetopic.py
@@ -0,0 +1,95 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+
+"""
+How to Create a Topic
+---------------------
+>>> from azure.servicebus import *
+>>> bus_service = ServiceBusService(ns, key, 'owner')
+>>> bus_service.create_topic('mytopic')
+True
+
+>>> topic_options = Topic()
+>>> topic_options.max_size_in_megabytes = '5120'
+>>> topic_options.default_message_time_to_live = 'PT1M'
+>>> bus_service.create_topic('mytopic2', topic_options)
+True
+
+How to Create Subscriptions
+---------------------------
+>>> bus_service.create_subscription('mytopic', 'AllMessages')
+True
+
+>>> bus_service.create_subscription('mytopic', 'HighMessages')
+True
+
+>>> rule = Rule()
+>>> rule.filter_type = 'SqlFilter'
+>>> rule.filter_expression = 'messagenumber > 3'
+>>> bus_service.create_rule('mytopic', 'HighMessages', 'HighMessageFilter', rule)
+True
+
+>>> bus_service.delete_rule('mytopic', 'HighMessages', DEFAULT_RULE_NAME)
+True
+
+>>> bus_service.create_subscription('mytopic', 'LowMessages')
+True
+
+>>> rule = Rule()
+>>> rule.filter_type = 'SqlFilter'
+>>> rule.filter_expression = 'messagenumber <= 3'
+>>> bus_service.create_rule('mytopic', 'LowMessages', 'LowMessageFilter', rule)
+True
+
+>>> bus_service.delete_rule('mytopic', 'LowMessages', DEFAULT_RULE_NAME)
+True
+
+How to Send Messages to a Topic
+-------------------------------
+>>> for i in range(5):
+... msg = Message('Msg ' + str(i), custom_properties={'messagenumber':i})
+... bus_service.send_topic_message('mytopic', msg)
+
+How to Receive Messages from a Subscription
+-------------------------------------------
+>>> msg = bus_service.receive_subscription_message('mytopic', 'LowMessages')
+>>> print(msg.body)
+Msg 0
+
+>>> msg = bus_service.receive_subscription_message('mytopic', 'LowMessages', peek_lock=True)
+>>> print(msg.body)
+Msg 1
+>>> msg.delete()
+
+How to Delete Topics and Subscriptions
+--------------------------------------
+>>> bus_service.delete_subscription('mytopic', 'HighMessages')
+True
+
+>>> bus_service.delete_queue('mytopic')
+True
+
+>>> bus_service.delete_queue('mytopic2')
+True
+
+"""
+from azuretest.util import *
+
+ns = credentials.getServiceBusNamespace()
+key = credentials.getServiceBusKey()
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/test/azuretest/doctest_tableservice.py b/test/azuretest/doctest_tableservice.py
new file mode 100644
index 000000000000..d98e9866b41a
--- /dev/null
+++ b/test/azuretest/doctest_tableservice.py
@@ -0,0 +1,116 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+
+"""
+How To: Create a Table
+----------------------
+>>> from azure.storage import *
+>>> table_service = TableService(name, key)
+>>> table_service.create_table('tasktable')
+True
+
+How to Add an Entity to a Table
+-------------------------------
+>>> task = {'PartitionKey': 'tasksSeattle', 'RowKey': '1', 'description' : 'Take out the trash', 'priority' : 200}
+>>> entity = table_service.insert_entity('tasktable', task)
+
+>>> task = Entity()
+>>> task.PartitionKey = 'tasksSeattle'
+>>> task.RowKey = '2'
+>>> task.description = 'Wash the car'
+>>> task.priority = 100
+>>> entity = table_service.insert_entity('tasktable', task)
+
+How to Update an Entity
+-----------------------
+>>> task = {'description' : 'Take out the garbage', 'priority' : 250}
+>>> entity = table_service.update_entity('tasktable', 'tasksSeattle', '1', task)
+
+>>> task = {'description' : 'Take out the garbage again', 'priority' : 250}
+>>> entity = table_service.insert_or_replace_entity('tasktable', 'tasksSeattle', '1', task)
+
+>>> task = {'description' : 'Buy detergent', 'priority' : 300}
+>>> entity = table_service.insert_or_replace_entity('tasktable', 'tasksSeattle', '3', task)
+
+
+How to Change a Group of Entities
+---------------------------------
+>>> task10 = {'PartitionKey': 'tasksSeattle', 'RowKey': '10', 'description' : 'Go grocery shopping', 'priority' : 400}
+>>> task11 = {'PartitionKey': 'tasksSeattle', 'RowKey': '11', 'description' : 'Clean the bathroom', 'priority' : 100}
+>>> table_service.begin_batch()
+>>> table_service.insert_entity('tasktable', task10)
+>>> table_service.insert_entity('tasktable', task11)
+>>> table_service.commit_batch()
+
+How to Query for an Entity
+--------------------------
+>>> task = table_service.get_entity('tasktable', 'tasksSeattle', '1')
+>>> print(task.description)
+Take out the garbage again
+>>> print(task.priority)
+250
+
+>>> task = table_service.get_entity('tasktable', 'tasksSeattle', '10')
+>>> print(task.description)
+Go grocery shopping
+>>> print(task.priority)
+400
+
+How to Query a Set of Entities
+------------------------------
+>>> tasks = table_service.query_entities('tasktable', "PartitionKey eq 'tasksSeattle'")
+>>> for task in tasks:
+... print(task.description)
+... print(task.priority)
+Take out the garbage again
+250
+Go grocery shopping
+400
+Clean the bathroom
+100
+Wash the car
+100
+Buy detergent
+300
+
+How to Query a Subset of Entity Properties
+------------------------------------------
+>>> tasks = table_service.query_entities('tasktable', "PartitionKey eq 'tasksSeattle'", 'description')
+>>> for task in tasks:
+... print(task.description)
+Take out the garbage again
+Go grocery shopping
+Clean the bathroom
+Wash the car
+Buy detergent
+
+How to Delete an Entity
+-----------------------
+>>> table_service.delete_entity('tasktable', 'tasksSeattle', '1')
+
+How to Delete a Table
+---------------------
+>>> table_service.delete_table('tasktable')
+True
+
+"""
+from azuretest.util import *
+
+name = credentials.getStorageServicesName()
+key = credentials.getStorageServicesKey()
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/test/azuretest/test_blobservice.py b/test/azuretest/test_blobservice.py
index 0d37c9747edf..087068c67c5b 100644
--- a/test/azuretest/test_blobservice.py
+++ b/test/azuretest/test_blobservice.py
@@ -1,5 +1,5 @@
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,28 +14,32 @@
#--------------------------------------------------------------------------
from azure.storage.blobservice import *
-from azure.storage import Metrics, BlockList
+from azure.storage import Metrics
+from azure.storage.storageclient import AZURE_STORAGE_ACCESS_KEY, AZURE_STORAGE_ACCOUNT, EMULATED, DEV_ACCOUNT_NAME, DEV_ACCOUNT_KEY
from azure import WindowsAzureError
from azuretest.util import *
from azure.http import HTTPRequest, HTTPResponse
import unittest
-import time
#------------------------------------------------------------------------------
-class BlobServiceTest(unittest.TestCase):
+class BlobServiceTest(AzureTestCase):
def setUp(self):
self.bc = BlobService(account_name=credentials.getStorageServicesName(),
account_key=credentials.getStorageServicesKey())
- # TODO: it may be overkill to use the machine name from
- # getUniqueTestRunID, current time may be unique enough
+ proxy_host = credentials.getProxyHost()
+ proxy_port = credentials.getProxyPort()
+ if proxy_host:
+ self.bc.set_proxy(proxy_host, proxy_port)
+
__uid = getUniqueTestRunID()
container_base_name = u'mytestcontainer%s' % (__uid)
self.container_name = getUniqueNameBasedOnCurrentTime(container_base_name)
+ self.additional_container_names = []
def tearDown(self):
self.cleanup()
@@ -46,26 +50,12 @@ def cleanup(self):
self.bc.delete_container(self.container_name)
except: pass
- #--Helpers-----------------------------------------------------------------
-
- # TODO: move this function out of here so other tests can use them
- # TODO: find out how to import/use safe_repr instead repr
- def assertNamedItemInContainer(self, container, item_name, msg=None):
- for item in container:
- if item.name == item_name:
- return
-
- standardMsg = '%s not found in %s' % (repr(item_name), repr(container))
- self.fail(self._formatMessage(msg, standardMsg))
-
- # TODO: move this function out of here so other tests can use them
- # TODO: find out how to import/use safe_repr instead repr
- def assertNamedItemNotInContainer(self, container, item_name, msg=None):
- for item in container:
- if item.name == item_name:
- standardMsg = '%s unexpectedly found in %s' % (repr(item_name), repr(container))
- self.fail(self._formatMessage(msg, standardMsg))
+ for name in self.additional_container_names:
+ try:
+ self.bc.delete_container(name)
+ except: pass
+ #--Helpers-----------------------------------------------------------------
def _create_container(self, container_name):
self.bc.create_container(container_name, None, None, True)
@@ -79,6 +69,92 @@ def _create_container_and_page_blob(self, container_name, blob_name, content_len
resp = self.bc.put_blob(self.container_name, blob_name, '', 'PageBlob', x_ms_blob_content_length=str(content_length))
self.assertIsNone(resp)
+ #--Test cases for blob service --------------------------------------------
+ def test_create_blob_service_missing_arguments(self):
+ # Arrange
+ if os.environ.has_key(AZURE_STORAGE_ACCOUNT):
+ del os.environ[AZURE_STORAGE_ACCOUNT]
+ if os.environ.has_key(AZURE_STORAGE_ACCESS_KEY):
+ del os.environ[AZURE_STORAGE_ACCESS_KEY]
+ if os.environ.has_key(EMULATED):
+ del os.environ[EMULATED]
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ bs = BlobService()
+
+ # Assert
+
+ def test_create_blob_service_env_variables(self):
+ # Arrange
+ os.environ[AZURE_STORAGE_ACCOUNT] = credentials.getStorageServicesName()
+ os.environ[AZURE_STORAGE_ACCESS_KEY] = credentials.getStorageServicesKey()
+
+ # Act
+ bs = BlobService()
+
+ if os.environ.has_key(AZURE_STORAGE_ACCOUNT):
+ del os.environ[AZURE_STORAGE_ACCOUNT]
+ if os.environ.has_key(AZURE_STORAGE_ACCESS_KEY):
+ del os.environ[AZURE_STORAGE_ACCESS_KEY]
+
+ # Assert
+ self.assertIsNotNone(bs)
+ self.assertEquals(bs.account_name, credentials.getStorageServicesName())
+ self.assertEquals(bs.account_key, credentials.getStorageServicesKey())
+ self.assertEquals(bs.is_emulated, False)
+
+ def test_create_blob_service_emulated_true(self):
+ # Arrange
+ os.environ[EMULATED] = 'true'
+
+ # Act
+ bs = BlobService()
+
+ if os.environ.has_key(EMULATED):
+ del os.environ[EMULATED]
+
+ # Assert
+ self.assertIsNotNone(bs)
+ self.assertEquals(bs.account_name, DEV_ACCOUNT_NAME)
+ self.assertEquals(bs.account_key, DEV_ACCOUNT_KEY)
+ self.assertEquals(bs.is_emulated, True)
+
+ def test_create_blob_service_emulated_false(self):
+ # Arrange
+ os.environ[EMULATED] = 'false'
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ bs = BlobService()
+
+ if os.environ.has_key(EMULATED):
+ del os.environ[EMULATED]
+
+ # Assert
+
+ def test_create_blob_service_emulated_false_env_variables(self):
+ # Arrange
+ os.environ[EMULATED] = 'false'
+ os.environ[AZURE_STORAGE_ACCOUNT] = credentials.getStorageServicesName()
+ os.environ[AZURE_STORAGE_ACCESS_KEY] = credentials.getStorageServicesKey()
+
+ # Act
+ bs = BlobService()
+
+ if os.environ.has_key(EMULATED):
+ del os.environ[EMULATED]
+ if os.environ.has_key(AZURE_STORAGE_ACCOUNT):
+ del os.environ[AZURE_STORAGE_ACCOUNT]
+ if os.environ.has_key(AZURE_STORAGE_ACCESS_KEY):
+ del os.environ[AZURE_STORAGE_ACCESS_KEY]
+
+ # Assert
+ self.assertIsNotNone(bs)
+ self.assertEquals(bs.account_name, credentials.getStorageServicesName())
+ self.assertEquals(bs.account_key, credentials.getStorageServicesKey())
+ self.assertEquals(bs.is_emulated, False)
+
#--Test cases for containers -----------------------------------------
def test_create_container_no_options(self):
# Arrange
@@ -98,6 +174,17 @@ def test_create_container_no_options_fail_on_exist(self):
# Assert
self.assertTrue(created)
+ def test_create_container_with_already_existing_container(self):
+ # Arrange
+
+ # Act
+ created1 = self.bc.create_container(self.container_name)
+ created2 = self.bc.create_container(self.container_name)
+
+ # Assert
+ self.assertTrue(created1)
+ self.assertFalse(created2)
+
def test_create_container_with_already_existing_container_fail_on_exist(self):
# Arrange
@@ -155,7 +242,62 @@ def test_list_containers_no_options(self):
# Assert
self.assertIsNotNone(containers)
+ self.assertGreaterEqual(len(containers), 1)
+ self.assertIsNotNone(containers[0])
+ self.assertNamedItemInContainer(containers, self.container_name)
+
+ def test_list_containers_with_prefix(self):
+ # Arrange
+ self.bc.create_container(self.container_name)
+
+ # Act
+ containers = self.bc.list_containers(self.container_name)
+
+ # Assert
+ self.assertIsNotNone(containers)
+ self.assertEqual(len(containers), 1)
+ self.assertIsNotNone(containers[0])
+ self.assertEqual(containers[0].name, self.container_name)
+ self.assertIsNone(containers[0].metadata);
+
+ def test_list_containers_with_include_metadata(self):
+ # Arrange
+ self.bc.create_container(self.container_name)
+ resp = self.bc.set_container_metadata(self.container_name, {'hello':'world', 'bar':'43'})
+
+ # Act
+ containers = self.bc.list_containers(self.container_name, None, None, 'metadata')
+
+ # Assert
+ self.assertIsNotNone(containers)
+ self.assertGreaterEqual(len(containers), 1)
+ self.assertIsNotNone(containers[0])
self.assertNamedItemInContainer(containers, self.container_name)
+ self.assertEqual(containers[0].metadata['hello'], 'world')
+ self.assertEqual(containers[0].metadata['bar'], '43')
+
+ def test_list_containers_with_maxresults_and_marker(self):
+ # Arrange
+ self.additional_container_names = [self.container_name + 'a',
+ self.container_name + 'b',
+ self.container_name + 'c',
+ self.container_name + 'd']
+ for name in self.additional_container_names:
+ self.bc.create_container(name)
+
+ # Act
+ containers1 = self.bc.list_containers(self.container_name, None, 2)
+ containers2 = self.bc.list_containers(self.container_name, containers1.next_marker, 2)
+
+ # Assert
+ self.assertIsNotNone(containers1)
+ self.assertEqual(len(containers1), 2)
+ self.assertNamedItemInContainer(containers1, self.container_name + 'a')
+ self.assertNamedItemInContainer(containers1, self.container_name + 'b')
+ self.assertIsNotNone(containers2)
+ self.assertEqual(len(containers2), 2)
+ self.assertNamedItemInContainer(containers2, self.container_name + 'c')
+ self.assertNamedItemInContainer(containers2, self.container_name + 'd')
def test_set_container_metadata(self):
# Arrange
@@ -183,12 +325,19 @@ def test_set_container_metadata_with_non_existing_container(self):
def test_get_container_metadata(self):
# Arrange
self.bc.create_container(self.container_name)
+ self.bc.set_container_acl(self.container_name, None, 'container')
+ self.bc.set_container_metadata(self.container_name, {'hello':'world','foo':'42'})
# Act
md = self.bc.get_container_metadata(self.container_name)
# Assert
self.assertIsNotNone(md)
+ self.assertEquals(md['x-ms-meta-hello'], 'world')
+ self.assertEquals(md['x-ms-meta-foo'], '42')
+ # TODO:
+ # get_container_properties returns container lease information whereas get_container_metadata doesn't
+ # we should lease the container in the arrange section and verify that we do not receive that info
def test_get_container_metadata_with_non_existing_container(self):
# Arrange
@@ -202,12 +351,19 @@ def test_get_container_metadata_with_non_existing_container(self):
def test_get_container_properties(self):
# Arrange
self.bc.create_container(self.container_name)
+ self.bc.set_container_acl(self.container_name, None, 'container')
+ self.bc.set_container_metadata(self.container_name, {'hello':'world','foo':'42'})
# Act
props = self.bc.get_container_properties(self.container_name)
# Assert
self.assertIsNotNone(props)
+ self.assertEquals(props['x-ms-meta-hello'], 'world')
+ self.assertEquals(props['x-ms-meta-foo'], '42')
+ # TODO:
+ # get_container_properties returns container lease information whereas get_container_metadata doesn't
+ # we should lease the container in the arrange section and verify that we receive that info
def test_get_container_properties_with_non_existing_container(self):
# Arrange
@@ -229,6 +385,20 @@ def test_get_container_acl(self):
self.assertIsNotNone(acl)
self.assertEqual(len(acl.signed_identifiers), 0)
+ def test_get_container_acl_iter(self):
+ # Arrange
+ self.bc.create_container(self.container_name)
+
+ # Act
+ acl = self.bc.get_container_acl(self.container_name)
+ for signed_identifier in acl:
+ pass
+
+ # Assert
+ self.assertIsNotNone(acl)
+ self.assertEqual(len(acl.signed_identifiers), 0)
+ self.assertEqual(len(acl), 0)
+
def test_get_container_acl_with_non_existing_container(self):
# Arrange
@@ -274,6 +444,45 @@ def test_set_container_acl_with_public_access_blob(self):
acl = self.bc.get_container_acl(self.container_name)
self.assertIsNotNone(acl)
+ def test_set_container_acl_with_empty_signed_identifiers(self):
+ # Arrange
+ self.bc.create_container(self.container_name)
+
+ # Act
+ identifiers = SignedIdentifiers()
+
+ resp = self.bc.set_container_acl(self.container_name, identifiers)
+
+ # Assert
+ self.assertIsNone(resp)
+ acl = self.bc.get_container_acl(self.container_name)
+ self.assertIsNotNone(acl)
+ self.assertEqual(len(acl.signed_identifiers), 0)
+
+ def test_set_container_acl_with_signed_identifiers(self):
+ # Arrange
+ self.bc.create_container(self.container_name)
+
+ # Act
+ si = SignedIdentifier()
+ si.id = 'testid'
+ si.access_policy.start = '2011-10-11'
+ si.access_policy.expiry = '2011-10-12'
+ si.access_policy.permission = 'r'
+ identifiers = SignedIdentifiers()
+ identifiers.signed_identifiers.append(si)
+
+ resp = self.bc.set_container_acl(self.container_name, identifiers)
+
+ # Assert
+ self.assertIsNone(resp)
+ acl = self.bc.get_container_acl(self.container_name)
+ self.assertIsNotNone(acl)
+ self.assertEqual(len(acl.signed_identifiers), 1)
+ self.assertEqual(len(acl), 1)
+ self.assertEqual(acl.signed_identifiers[0].id, 'testid')
+ self.assertEqual(acl[0].id, 'testid')
+
def test_set_container_acl_with_non_existing_container(self):
# Arrange
@@ -379,8 +588,8 @@ def test_list_blobs(self):
# Arrange
self._create_container(self.container_name)
data = 'hello world'
- resp = self.bc.put_blob(self.container_name, 'blob1', data, 'BlockBlob')
- resp = self.bc.put_blob(self.container_name, 'blob2', data, 'BlockBlob')
+ self.bc.put_blob(self.container_name, 'blob1', data, 'BlockBlob')
+ self.bc.put_blob(self.container_name, 'blob2', data, 'BlockBlob')
# Act
blobs = self.bc.list_blobs(self.container_name)
@@ -389,8 +598,170 @@ def test_list_blobs(self):
# Assert
self.assertIsNotNone(blobs)
+ self.assertGreaterEqual(len(blobs), 2)
+ self.assertIsNotNone(blobs[0])
self.assertNamedItemInContainer(blobs, 'blob1')
self.assertNamedItemInContainer(blobs, 'blob2')
+ self.assertEqual(blobs[0].properties.content_length, 11)
+ self.assertEqual(blobs[1].properties.content_type, 'application/octet-stream Charset=UTF-8')
+
+ def test_list_blobs_with_prefix(self):
+ # Arrange
+ self._create_container(self.container_name)
+ data = 'hello world'
+ self.bc.put_blob(self.container_name, 'bloba1', data, 'BlockBlob')
+ self.bc.put_blob(self.container_name, 'bloba2', data, 'BlockBlob')
+ self.bc.put_blob(self.container_name, 'blobb1', data, 'BlockBlob')
+
+ # Act
+ blobs = self.bc.list_blobs(self.container_name, 'bloba')
+
+ # Assert
+ self.assertIsNotNone(blobs)
+ self.assertEqual(len(blobs), 2)
+ self.assertNamedItemInContainer(blobs, 'bloba1')
+ self.assertNamedItemInContainer(blobs, 'bloba2')
+
+ def test_list_blobs_with_maxresults(self):
+ # Arrange
+ self._create_container(self.container_name)
+ data = 'hello world'
+ self.bc.put_blob(self.container_name, 'bloba1', data, 'BlockBlob')
+ self.bc.put_blob(self.container_name, 'bloba2', data, 'BlockBlob')
+ self.bc.put_blob(self.container_name, 'bloba3', data, 'BlockBlob')
+ self.bc.put_blob(self.container_name, 'blobb1', data, 'BlockBlob')
+
+ # Act
+ blobs = self.bc.list_blobs(self.container_name, None, None, 2)
+
+ # Assert
+ self.assertIsNotNone(blobs)
+ self.assertEqual(len(blobs), 2)
+ self.assertNamedItemInContainer(blobs, 'bloba1')
+ self.assertNamedItemInContainer(blobs, 'bloba2')
+
+ def test_list_blobs_with_maxresults_and_marker(self):
+ # Arrange
+ self._create_container(self.container_name)
+ data = 'hello world'
+ self.bc.put_blob(self.container_name, 'bloba1', data, 'BlockBlob')
+ self.bc.put_blob(self.container_name, 'bloba2', data, 'BlockBlob')
+ self.bc.put_blob(self.container_name, 'bloba3', data, 'BlockBlob')
+ self.bc.put_blob(self.container_name, 'blobb1', data, 'BlockBlob')
+
+ # Act
+ blobs1 = self.bc.list_blobs(self.container_name, None, None, 2)
+ blobs2 = self.bc.list_blobs(self.container_name, None, blobs1.next_marker, 2)
+
+ # Assert
+ self.assertEqual(len(blobs1), 2)
+ self.assertEqual(len(blobs2), 2)
+ self.assertNamedItemInContainer(blobs1, 'bloba1')
+ self.assertNamedItemInContainer(blobs1, 'bloba2')
+ self.assertNamedItemInContainer(blobs2, 'bloba3')
+ self.assertNamedItemInContainer(blobs2, 'blobb1')
+
+ def test_list_blobs_with_include_snapshots(self):
+ # Arrange
+ self._create_container(self.container_name)
+ data = 'hello world'
+ self.bc.put_blob(self.container_name, 'blob1', data, 'BlockBlob')
+ self.bc.put_blob(self.container_name, 'blob2', data, 'BlockBlob')
+ self.bc.snapshot_blob(self.container_name, 'blob1')
+
+ # Act
+ blobs = self.bc.list_blobs(self.container_name, include='snapshots')
+
+ # Assert
+ self.assertEqual(len(blobs), 3)
+ self.assertEqual(blobs[0].name, 'blob1')
+ self.assertNotEqual(blobs[0].snapshot, '')
+ self.assertEqual(blobs[1].name, 'blob1')
+ self.assertEqual(blobs[1].snapshot, '')
+ self.assertEqual(blobs[2].name, 'blob2')
+ self.assertEqual(blobs[2].snapshot, '')
+
+ def test_list_blobs_with_include_metadata(self):
+ # Arrange
+ self._create_container(self.container_name)
+ data = 'hello world'
+ self.bc.put_blob(self.container_name, 'blob1', data, 'BlockBlob', x_ms_meta_name_values={'foo':'1','bar':'bob'})
+ self.bc.put_blob(self.container_name, 'blob2', data, 'BlockBlob', x_ms_meta_name_values={'foo':'2','bar':'car'})
+ self.bc.snapshot_blob(self.container_name, 'blob1')
+
+ # Act
+ blobs = self.bc.list_blobs(self.container_name, include='metadata')
+
+ # Assert
+ self.assertEqual(len(blobs), 2)
+ self.assertEqual(blobs[0].name, 'blob1')
+ self.assertEqual(blobs[0].metadata['foo'], '1')
+ self.assertEqual(blobs[0].metadata['bar'], 'bob')
+ self.assertEqual(blobs[1].name, 'blob2')
+ self.assertEqual(blobs[1].metadata['foo'], '2')
+ self.assertEqual(blobs[1].metadata['bar'], 'car')
+
+ def test_list_blobs_with_include_uncommittedblobs(self):
+ # Arrange
+ self._create_container(self.container_name)
+ data = 'hello world'
+ self.bc.put_block(self.container_name, 'blob1', 'AAA', '1')
+ self.bc.put_block(self.container_name, 'blob1', 'BBB', '2')
+ self.bc.put_block(self.container_name, 'blob1', 'CCC', '3')
+ self.bc.put_blob(self.container_name, 'blob2', data, 'BlockBlob', x_ms_meta_name_values={'foo':'2','bar':'car'})
+
+ # Act
+ blobs = self.bc.list_blobs(self.container_name, include='uncommittedblobs')
+
+ # Assert
+ self.assertEqual(len(blobs), 2)
+ self.assertEqual(blobs[0].name, 'blob1')
+ self.assertEqual(blobs[1].name, 'blob2')
+
+ #def test_list_blobs_with_include_copy(self):
+ # # Arrange
+ # self._create_container(self.container_name)
+ # data = 'hello world'
+ # self.bc.put_blob(self.container_name, 'blob1', data, 'BlockBlob', x_ms_meta_name_values={'status':'original'})
+ # sourceblob = '/%s/%s/%s' % (credentials.getStorageServicesName(),
+ # self.container_name,
+ # 'blob1')
+ # self.bc.copy_blob(self.container_name, 'blob1copy', sourceblob, {'status':'copy'})
+
+ # # Act
+ # blobs = self.bc.list_blobs(self.container_name, include='copy')
+
+ # # Assert
+ # self.assertEqual(len(blobs), 2)
+ # self.assertEqual(blobs[0].name, 'blob1')
+ # self.assertEqual(blobs[1].name, 'blob2')
+ # #TODO: check for metadata related to copy blob
+
+ def test_list_blobs_with_include_multiple(self):
+ # Arrange
+ self._create_container(self.container_name)
+ data = 'hello world'
+ self.bc.put_blob(self.container_name, 'blob1', data, 'BlockBlob', x_ms_meta_name_values={'foo':'1','bar':'bob'})
+ self.bc.put_blob(self.container_name, 'blob2', data, 'BlockBlob', x_ms_meta_name_values={'foo':'2','bar':'car'})
+ self.bc.snapshot_blob(self.container_name, 'blob1')
+
+ # Act
+ blobs = self.bc.list_blobs(self.container_name, include='snapshots,metadata')
+
+ # Assert
+ self.assertEqual(len(blobs), 3)
+ self.assertEqual(blobs[0].name, 'blob1')
+ self.assertNotEqual(blobs[0].snapshot, '')
+ self.assertEqual(blobs[0].metadata['foo'], '1')
+ self.assertEqual(blobs[0].metadata['bar'], 'bob')
+ self.assertEqual(blobs[1].name, 'blob1')
+ self.assertEqual(blobs[1].snapshot, '')
+ self.assertEqual(blobs[1].metadata['foo'], '1')
+ self.assertEqual(blobs[1].metadata['bar'], 'bob')
+ self.assertEqual(blobs[2].name, 'blob2')
+ self.assertEqual(blobs[2].snapshot, '')
+ self.assertEqual(blobs[2].metadata['foo'], '2')
+ self.assertEqual(blobs[2].metadata['bar'], 'car')
def test_put_blob_block_blob(self):
# Arrange
@@ -413,6 +784,35 @@ def test_put_blob_page_blob(self):
# Assert
self.assertIsNone(resp)
+ def test_put_blob_with_lease_id(self):
+ # Arrange
+ self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
+ lease = self.bc.lease_blob(self.container_name, 'blob1', 'acquire')
+ lease_id = lease['x-ms-lease-id']
+
+ # Act
+ data = 'hello world again'
+ resp = self.bc.put_blob(self.container_name, 'blob1', data, 'BlockBlob', x_ms_lease_id=lease_id)
+
+ # Assert
+ self.assertIsNone(resp)
+ blob = self.bc.get_blob(self.container_name, 'blob1', x_ms_lease_id=lease_id)
+ self.assertEqual(blob, 'hello world again')
+
+ def test_put_blob_with_metadata(self):
+ # Arrange
+ self._create_container(self.container_name)
+
+ # Act
+ data = 'hello world'
+ resp = self.bc.put_blob(self.container_name, 'blob1', data, 'BlockBlob', x_ms_meta_name_values={'hello':'world','foo':'42'})
+
+ # Assert
+ self.assertIsNone(resp)
+ md = self.bc.get_blob_metadata(self.container_name, 'blob1')
+ self.assertEquals(md['x-ms-meta-hello'], 'world')
+ self.assertEquals(md['x-ms-meta-foo'], '42')
+
def test_get_blob_with_existing_blob(self):
# Arrange
self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
@@ -421,7 +821,84 @@ def test_get_blob_with_existing_blob(self):
blob = self.bc.get_blob(self.container_name, 'blob1')
# Assert
- self.assertEqual(type(blob), str)
+ self.assertIsInstance(blob, BlobResult)
+ self.assertEquals(blob, 'hello world')
+
+ def test_get_blob_with_snapshot(self):
+ # Arrange
+ self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
+ snapshot = self.bc.snapshot_blob(self.container_name, 'blob1')
+
+ # Act
+ blob = self.bc.get_blob(self.container_name, 'blob1', snapshot['x-ms-snapshot'])
+
+ # Assert
+ self.assertIsInstance(blob, BlobResult)
+ self.assertEquals(blob, 'hello world')
+
+ def test_get_blob_with_snapshot_previous(self):
+ # Arrange
+ self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
+ snapshot = self.bc.snapshot_blob(self.container_name, 'blob1')
+ self.bc.put_blob(self.container_name, 'blob1', 'hello world again', 'BlockBlob')
+
+ # Act
+ blob_previous = self.bc.get_blob(self.container_name, 'blob1', snapshot['x-ms-snapshot'])
+ blob_latest = self.bc.get_blob(self.container_name, 'blob1')
+
+ # Assert
+ self.assertIsInstance(blob_previous, BlobResult)
+ self.assertIsInstance(blob_latest, BlobResult)
+ self.assertEquals(blob_previous, 'hello world')
+ self.assertEquals(blob_latest, 'hello world again')
+
+ def test_get_blob_with_range(self):
+ # Arrange
+ self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
+
+ # Act
+ blob = self.bc.get_blob(self.container_name, 'blob1', x_ms_range='bytes=0-5')
+
+ # Assert
+ self.assertIsInstance(blob, BlobResult)
+ self.assertEquals(blob, 'hello ')
+
+ def test_get_blob_with_range_and_get_content_md5(self):
+ # Arrange
+ self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
+
+ # Act
+ blob = self.bc.get_blob(self.container_name, 'blob1', x_ms_range='bytes=0-5', x_ms_range_get_content_md5='true')
+
+ # Assert
+ self.assertIsInstance(blob, BlobResult)
+ self.assertEquals(blob, 'hello ')
+ self.assertEquals(blob.properties['content-md5'], '+BSJN3e8wilf/wXwDlCNpg==')
+
+ def test_get_blob_with_lease(self):
+ # Arrange
+ self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
+ lease = self.bc.lease_blob(self.container_name, 'blob1', 'acquire')
+ lease_id = lease['x-ms-lease-id']
+
+ # Act
+ blob = self.bc.get_blob(self.container_name, 'blob1', x_ms_lease_id=lease_id)
+ self.bc.lease_blob(self.container_name, 'blob1', 'release', lease_id)
+
+ # Assert
+ self.assertIsInstance(blob, BlobResult)
+ self.assertEquals(blob, 'hello world')
+
+ def test_get_blob_on_leased_blob_without_lease_id(self):
+ # Arrange
+ self._create_container_and_block_blob(self.container_name, 'blob1', 'hello world')
+ self.bc.lease_blob(self.container_name, 'blob1', 'acquire')
+
+ # Act
+ blob = self.bc.get_blob(self.container_name, 'blob1') # get_blob is allowed without lease id
+
+ # Assert
+ self.assertIsInstance(blob, BlobResult)
self.assertEquals(blob, 'hello world')
def test_get_blob_with_non_existing_container(self):
@@ -453,7 +930,7 @@ def test_set_blob_properties_with_existing_blob(self):
# Assert
self.assertIsNone(resp)
props = self.bc.get_blob_properties(self.container_name, 'blob1')
- self.assertEquals(props['Content-Language'], 'spanish')
+ self.assertEquals(props['content-language'], 'spanish')
def test_set_blob_properties_with_non_existing_container(self):
# Arrange
@@ -485,6 +962,7 @@ def test_get_blob_properties_with_existing_blob(self):
self.assertIsNotNone(props)
self.assertEquals(props['x-ms-blob-type'], 'BlockBlob')
self.assertEquals(props['x-ms-lease-status'], 'unlocked')
+ self.assertEquals(props['content-length'], '11')
def test_get_blob_properties_with_non_existing_container(self):
# Arrange
@@ -571,7 +1049,8 @@ def test_snapshot_blob(self):
resp = self.bc.snapshot_blob(self.container_name, 'blob1')
# Assert
- self.assertIsNone(resp)
+ self.assertIsNotNone(resp)
+ self.assertIsNotNone(resp['x-ms-snapshot'])
def test_lease_blob_acquire_and_release(self):
# Arrange
@@ -625,6 +1104,8 @@ def test_put_block_list(self):
# Assert
self.assertIsNone(resp)
+ blob = self.bc.get_blob(self.container_name, 'blob1')
+ self.assertEqual(blob, 'AAABBBCCC')
def test_get_block_list_no_blocks(self):
# Arrange
@@ -724,6 +1205,23 @@ def test_get_page_ranges_2_pages(self):
self.assertEquals(ranges.page_ranges[1].start, 1024)
self.assertEquals(ranges.page_ranges[1].end, 1535)
+ def test_get_page_ranges_iter(self):
+ # Arrange
+ self._create_container_and_page_blob(self.container_name, 'blob1', 2048)
+ data = 'abcdefghijklmnop' * 32
+ resp1 = self.bc.put_page(self.container_name, 'blob1', data, 'bytes=0-511', 'update')
+ resp2 = self.bc.put_page(self.container_name, 'blob1', data, 'bytes=1024-1535', 'update')
+
+ # Act
+ ranges = self.bc.get_page_ranges(self.container_name, 'blob1')
+ for range in ranges:
+ pass
+
+ # Assert
+ self.assertEquals(len(ranges), 2)
+ self.assertIsInstance(ranges[0], PageRange)
+ self.assertIsInstance(ranges[1], PageRange)
+
def test_with_filter(self):
# Single filter
called = []
diff --git a/test/azuretest/test_cloudstorageaccount.py b/test/azuretest/test_cloudstorageaccount.py
new file mode 100644
index 000000000000..01eed4a2af8a
--- /dev/null
+++ b/test/azuretest/test_cloudstorageaccount.py
@@ -0,0 +1,77 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+
+from azure.storage import *
+from azuretest.util import *
+
+import unittest
+
+#------------------------------------------------------------------------------
+class CloudStorageAccountTest(AzureTestCase):
+
+ def setUp(self):
+ self.account = CloudStorageAccount(account_name=credentials.getStorageServicesName(),
+ account_key=credentials.getStorageServicesKey())
+
+ #--Test cases --------------------------------------------------------
+ def test_create_blob_service(self):
+ # Arrange
+
+ # Act
+ service = self.account.create_blob_service()
+
+ # Assert
+ self.assertIsNotNone(service)
+ self.assertIsInstance(service, BlobService)
+ self.assertEqual(service.account_name, credentials.getStorageServicesName())
+ self.assertEqual(service.account_key, credentials.getStorageServicesKey())
+
+ def test_create_blob_service_empty_credentials(self):
+ # Arrange
+
+ # Act
+ bad_account = CloudStorageAccount('', '')
+ with self.assertRaises(WindowsAzureError):
+ service = bad_account.create_blob_service()
+
+ # Assert
+
+ def test_create_table_service(self):
+ # Arrange
+
+ # Act
+ service = self.account.create_table_service()
+
+ # Assert
+ self.assertIsNotNone(service)
+ self.assertIsInstance(service, TableService)
+ self.assertEqual(service.account_name, credentials.getStorageServicesName())
+ self.assertEqual(service.account_key, credentials.getStorageServicesKey())
+
+ def test_create_queue_service(self):
+ # Arrange
+
+ # Act
+ service = self.account.create_queue_service()
+
+ # Assert
+ self.assertIsNotNone(service)
+ self.assertIsInstance(service, QueueService)
+ self.assertEqual(service.account_name, credentials.getStorageServicesName())
+ self.assertEqual(service.account_key, credentials.getStorageServicesKey())
+
+#------------------------------------------------------------------------------
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/azuretest/test_queueservice.py b/test/azuretest/test_queueservice.py
index a04f9c2160d8..c058b6c8a364 100644
--- a/test/azuretest/test_queueservice.py
+++ b/test/azuretest/test_queueservice.py
@@ -1,6 +1,5 @@
-
#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,27 +18,30 @@
from azuretest.util import *
import unittest
-import time
#------------------------------------------------------------------------------
TEST_QUEUE_PREFIX = 'mytestqueue'
#------------------------------------------------------------------------------
-class QueueServiceTest(unittest.TestCase):
+class QueueServiceTest(AzureTestCase):
def setUp(self):
self.queue_client = QueueService(account_name=credentials.getStorageServicesName(),
account_key=credentials.getStorageServicesKey())
- # TODO: it may be overkill to use the machine name from
- # getUniqueTestRunID, current time may be unique enough
+
+ proxy_host = credentials.getProxyHost()
+ proxy_port = credentials.getProxyPort()
+ if proxy_host:
+ self.queue_client.set_proxy(proxy_host, proxy_port)
+
__uid = getUniqueTestRunID()
queue_base_name = u'%s' % (__uid)
self.test_queues = []
self.creatable_queues = []
for i in range(10):
- self.test_queues.append(TEST_QUEUE_PREFIX + getUniqueNameBasedOnCurrentTime(queue_base_name))
+ self.test_queues.append(TEST_QUEUE_PREFIX + str(i) + getUniqueNameBasedOnCurrentTime(queue_base_name))
for i in range(4):
- self.creatable_queues.append('mycreatablequeue' + getUniqueNameBasedOnCurrentTime(queue_base_name))
+ self.creatable_queues.append('mycreatablequeue' + str(i) + getUniqueNameBasedOnCurrentTime(queue_base_name))
for queue_name in self.test_queues:
self.queue_client.create_queue(queue_name)
@@ -107,6 +109,24 @@ def test_create_queue(self):
self.assertIsNotNone(result)
self.assertEqual(result['x-ms-approximate-messages-count'], '0')
+ def test_create_queue_already_exist(self):
+ #Action
+ created1 = self.queue_client.create_queue(self.creatable_queues[0])
+ created2 = self.queue_client.create_queue(self.creatable_queues[0])
+
+ #Asserts
+ self.assertTrue(created1)
+ self.assertFalse(created2)
+
+ def test_create_queue_fail_on_exist(self):
+ #Action
+ created = self.queue_client.create_queue(self.creatable_queues[0], None, True)
+ with self.assertRaises(WindowsAzureError):
+ self.queue_client.create_queue(self.creatable_queues[0], None, True)
+
+ #Asserts
+ self.assertTrue(created)
+
def test_create_queue_with_options(self):
#Action
self.queue_client.create_queue(self.creatable_queues[1], x_ms_meta_name_values = {'foo':'test', 'bar':'blah'})
@@ -118,9 +138,34 @@ def test_create_queue_with_options(self):
self.assertEqual('test', result['x-ms-meta-foo'])
self.assertEqual('blah', result['x-ms-meta-bar'])
+ def test_delete_queue_not_exist(self):
+ #Action
+ deleted = self.queue_client.delete_queue(self.creatable_queues[0])
+
+ #Asserts
+ self.assertFalse(deleted)
+
+ def test_delete_queue_fail_not_exist_not_exist(self):
+ #Action
+ with self.assertRaises(WindowsAzureError):
+ self.queue_client.delete_queue(self.creatable_queues[0], True)
+
+ #Asserts
+
+ def test_delete_queue_fail_not_exist_already_exist(self):
+ #Action
+ created = self.queue_client.create_queue(self.creatable_queues[0])
+ deleted = self.queue_client.delete_queue(self.creatable_queues[0], True)
+
+ #Asserts
+ self.assertTrue(created)
+ self.assertTrue(deleted)
+
def test_list_queues(self):
#Action
queues = self.queue_client.list_queues()
+ for queue in queues:
+ pass
#Asserts
self.assertIsNotNone(queues)
@@ -172,7 +217,7 @@ def test_put_message(self):
self.queue_client.put_message(self.test_queues[0], 'message3')
self.queue_client.put_message(self.test_queues[0], 'message4')
- def test_get_messges(self):
+ def test_get_messages(self):
#Action
self.queue_client.put_message(self.test_queues[1], 'message1')
self.queue_client.put_message(self.test_queues[1], 'message2')
diff --git a/test/azuretest/test_servicebusservice.py b/test/azuretest/test_servicebusservice.py
index 46edc29686ec..d5a6ca0dd053 100644
--- a/test/azuretest/test_servicebusservice.py
+++ b/test/azuretest/test_servicebusservice.py
@@ -1,15 +1,17 @@
-#------------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation.
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
#
-# This source code is subject to terms and conditions of the Apache License,
-# Version 2.0. A copy of the license can be found in the License.html file at
-# the root of this distribution. If you cannot locate the Apache License,
-# Version 2.0, please send an email to vspython@microsoft.com. By using this
-# source code in any fashion, you are agreeing to be bound by the terms of the
-# Apache License, Version 2.0.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# You must not remove this notice, or any other, from this software.
-#------------------------------------------------------------------------------
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
from azure import *
from azure.servicebus import *
@@ -18,14 +20,17 @@
import unittest
#------------------------------------------------------------------------------
-class ServiceBusTest(unittest.TestCase):
+class ServiceBusTest(AzureTestCase):
def setUp(self):
self.sbs = ServiceBusService(credentials.getServiceBusNamespace(),
credentials.getServiceBusKey(),
'owner')
- # TODO: it may be overkill to use the machine name from
- # getUniqueTestRunID, current time may be unique enough
+ proxy_host = credentials.getProxyHost()
+ proxy_port = credentials.getProxyPort()
+ if proxy_host:
+ self.sbs.set_proxy(proxy_host, proxy_port)
+
__uid = getUniqueTestRunID()
queue_base_name = u'mytestqueue%s' % (__uid)
@@ -48,25 +53,6 @@ def cleanup(self):
except: pass
#--Helpers-----------------------------------------------------------------
-
- # TODO: move this function out of here so other tests can use them
- # TODO: find out how to import/use safe_repr instead repr
- def assertNamedItemInContainer(self, container, item_name, msg=None):
- for item in container:
- if item.name == item_name:
- return
-
- standardMsg = '%s not found in %s' % (repr(item_name), repr(container))
- self.fail(self._formatMessage(msg, standardMsg))
-
- # TODO: move this function out of here so other tests can use them
- # TODO: find out how to import/use safe_repr instead repr
- def assertNamedItemNotInContainer(self, container, item_name, msg=None):
- for item in container:
- if item.name == item_name:
- standardMsg = '%s unexpectedly found in %s' % (repr(item_name), repr(container))
- self.fail(self._formatMessage(msg, standardMsg))
-
def _create_queue(self, queue_name):
self.sbs.create_queue(queue_name, None, True)
@@ -84,6 +70,44 @@ def _create_topic_and_subscription(self, topic_name, subscription_name):
def _create_subscription(self, topic_name, subscription_name):
self.sbs.create_subscription(topic_name, subscription_name, None, True)
+ #--Test cases for service bus service -------------------------------------
+ def test_create_service_bus_missing_arguments(self):
+ # Arrange
+ if os.environ.has_key(AZURE_SERVICEBUS_NAMESPACE):
+ del os.environ[AZURE_SERVICEBUS_NAMESPACE]
+ if os.environ.has_key(AZURE_SERVICEBUS_ACCESS_KEY):
+ del os.environ[AZURE_SERVICEBUS_ACCESS_KEY]
+ if os.environ.has_key(AZURE_SERVICEBUS_ISSUER):
+ del os.environ[AZURE_SERVICEBUS_ISSUER]
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ sbs = ServiceBusService()
+
+ # Assert
+
+ def test_create_service_bus_env_variables(self):
+ # Arrange
+ os.environ[AZURE_SERVICEBUS_NAMESPACE] = credentials.getServiceBusNamespace()
+ os.environ[AZURE_SERVICEBUS_ACCESS_KEY] = credentials.getServiceBusKey()
+ os.environ[AZURE_SERVICEBUS_ISSUER] = 'owner'
+
+ # Act
+ sbs = ServiceBusService()
+
+ if os.environ.has_key(AZURE_SERVICEBUS_NAMESPACE):
+ del os.environ[AZURE_SERVICEBUS_NAMESPACE]
+ if os.environ.has_key(AZURE_SERVICEBUS_ACCESS_KEY):
+ del os.environ[AZURE_SERVICEBUS_ACCESS_KEY]
+ if os.environ.has_key(AZURE_SERVICEBUS_ISSUER):
+ del os.environ[AZURE_SERVICEBUS_ISSUER]
+
+ # Assert
+ self.assertIsNotNone(sbs)
+ self.assertEquals(sbs.service_namespace, credentials.getServiceBusNamespace())
+ self.assertEquals(sbs.account_key, credentials.getServiceBusKey())
+ self.assertEquals(sbs.issuer, 'owner')
+
#--Test cases for queues --------------------------------------------------
def test_create_queue_no_options(self):
# Arrange
@@ -108,12 +132,33 @@ def test_create_queue_with_options(self):
# Act
queue_options = Queue()
- queue_options.max_size_in_megabytes = 5120
queue_options.default_message_time_to_live = 'PT1M'
+ queue_options.duplicate_detection_history_time_window = 'PT5M'
+ queue_options.enable_batched_operations = False
+ queue_options.dead_lettering_on_message_expiration = False
+ queue_options.lock_duration = 'PT1M'
+ queue_options.max_delivery_count = 15
+ queue_options.max_size_in_megabytes = 5120
+ queue_options.message_count = 0
+ queue_options.requires_duplicate_detection = False
+ queue_options.requires_session = False
+ queue_options.size_in_bytes = 0
created = self.sbs.create_queue(self.queue_name, queue_options)
# Assert
self.assertTrue(created)
+ queue = self.sbs.get_queue(self.queue_name)
+ self.assertEquals('PT1M', queue.default_message_time_to_live)
+ self.assertEquals('PT5M', queue.duplicate_detection_history_time_window)
+ self.assertEquals(False, queue.enable_batched_operations)
+ self.assertEquals(False, queue.dead_lettering_on_message_expiration)
+ self.assertEquals('PT1M', queue.lock_duration)
+ self.assertEquals(15, queue.max_delivery_count)
+ self.assertEquals(5120, queue.max_size_in_megabytes)
+ self.assertEquals(0, queue.message_count)
+ self.assertEquals(False, queue.requires_duplicate_detection)
+ self.assertEquals(False, queue.requires_session)
+ self.assertEquals(0, queue.size_in_bytes)
def test_create_queue_with_already_existing_queue(self):
# Arrange
@@ -319,7 +364,14 @@ def test_send_queue_message_with_custom_message_properties(self):
self._create_queue(self.queue_name)
# Act
- sent_msg = Message('message with properties', custom_properties={'hello':'world', 'foo':42})
+ props = {'hello':'world',
+ 'foo':42,
+ 'active':True,
+ 'deceased':False,
+ 'large':8555111000,
+ 'floating':3.14,
+ 'dob':datetime(2011, 12, 14)}
+ sent_msg = Message('message with properties', custom_properties=props)
self.sbs.send_queue_message(self.queue_name, sent_msg)
received_msg = self.sbs.receive_queue_message(self.queue_name, True, 5)
received_msg.delete()
@@ -327,7 +379,12 @@ def test_send_queue_message_with_custom_message_properties(self):
# Assert
self.assertIsNotNone(received_msg)
self.assertEquals(received_msg.custom_properties['hello'], 'world')
- self.assertEquals(received_msg.custom_properties['foo'], '42') # TODO: note that the integer became a string
+ self.assertEquals(received_msg.custom_properties['foo'], 42)
+ self.assertEquals(received_msg.custom_properties['active'], True)
+ self.assertEquals(received_msg.custom_properties['deceased'], False)
+ self.assertEquals(received_msg.custom_properties['large'], 8555111000)
+ self.assertEquals(received_msg.custom_properties['floating'], 3.14)
+ self.assertEquals(received_msg.custom_properties['dob'], datetime(2011, 12, 14))
#--Test cases for topics/subscriptions ------------------------------------
def test_create_topic_no_options(self):
@@ -353,12 +410,24 @@ def test_create_topic_with_options(self):
# Act
topic_options = Topic()
- topic_options.max_size_in_megabytes = '5120'
topic_options.default_message_time_to_live = 'PT1M'
+ topic_options.duplicate_detection_history_time_window = 'PT5M'
+ topic_options.enable_batched_operations = False
+ topic_options.max_size_in_megabytes = 5120
+ topic_options.requires_duplicate_detection = False
+ topic_options.size_in_bytes = 0
+ #TODO: MaximumNumberOfSubscriptions is not supported?
created = self.sbs.create_topic(self.topic_name, topic_options)
# Assert
self.assertTrue(created)
+ topic = self.sbs.get_topic(self.topic_name)
+ self.assertEquals('PT1M', topic.default_message_time_to_live)
+ self.assertEquals('PT5M', topic.duplicate_detection_history_time_window)
+ self.assertEquals(False, topic.enable_batched_operations)
+ self.assertEquals(5120, topic.max_size_in_megabytes)
+ self.assertEquals(False, topic.requires_duplicate_detection)
+ self.assertEquals(0, topic.size_in_bytes)
def test_create_topic_with_already_existing_topic(self):
# Arrange
@@ -382,6 +451,23 @@ def test_create_topic_with_already_existing_topic_fail_on_exist(self):
# Assert
self.assertTrue(created)
+ def test_topic_backwards_compatibility_warning(self):
+ # Arrange
+ topic_options = Topic()
+ topic_options.max_size_in_megabytes = 5120
+
+ # Act
+ val = topic_options.max_size_in_mega_bytes
+
+ # Assert
+ self.assertEqual(val, 5120)
+
+ # Act
+ topic_options.max_size_in_mega_bytes = 1024
+
+ # Assert
+ self.assertEqual(topic_options.max_size_in_megabytes, 1024)
+
def test_get_topic_with_existing_topic(self):
# Arrange
self._create_topic(self.topic_name)
@@ -467,6 +553,35 @@ def test_create_subscription(self):
# Assert
self.assertTrue(created)
+ def test_create_subscription_with_options(self):
+ # Arrange
+ self._create_topic(self.topic_name)
+
+ # Act
+ subscription_options = Subscription()
+ subscription_options.dead_lettering_on_filter_evaluation_exceptions = False
+ subscription_options.dead_lettering_on_message_expiration = False
+ subscription_options.default_message_time_to_live = 'PT15M'
+ subscription_options.enable_batched_operations = False
+ subscription_options.lock_duration = 'PT1M'
+ subscription_options.max_delivery_count = 15
+ #message_count is read-only
+ subscription_options.message_count = 0
+ subscription_options.requires_session = False
+ created = self.sbs.create_subscription(self.topic_name, 'MySubscription', subscription_options)
+
+ # Assert
+ self.assertTrue(created)
+ subscription = self.sbs.get_subscription(self.topic_name, 'MySubscription')
+ self.assertEquals(False, subscription.dead_lettering_on_filter_evaluation_exceptions)
+ self.assertEquals(False, subscription.dead_lettering_on_message_expiration)
+ self.assertEquals('PT15M', subscription.default_message_time_to_live)
+ self.assertEquals(False, subscription.enable_batched_operations)
+ self.assertEquals('PT1M', subscription.lock_duration)
+ #self.assertEquals(15, subscription.max_delivery_count) #no idea why max_delivery_count is always 10
+ self.assertEquals(0, subscription.message_count)
+ self.assertEquals(False, subscription.requires_session)
+
def test_create_subscription_fail_on_exist(self):
# Arrange
self._create_topic(self.topic_name)
@@ -630,7 +745,7 @@ def test_create_rule_with_already_existing_rule_fail_on_exist(self):
# Assert
self.assertTrue(created)
- def test_create_rule_with_options(self):
+ def test_create_rule_with_options_sql_filter(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
@@ -643,6 +758,71 @@ def test_create_rule_with_options(self):
# Assert
self.assertTrue(created)
+ def test_create_rule_with_options_true_filter(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+
+ # Act
+ rule1 = Rule()
+ rule1.filter_type = 'TrueFilter'
+ rule1.filter_expression = '1=1'
+ created = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1', rule1)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_create_rule_with_options_false_filter(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+
+ # Act
+ rule1 = Rule()
+ rule1.filter_type = 'FalseFilter'
+ rule1.filter_expression = '1=0'
+ created = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1', rule1)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_create_rule_with_options_correlation_filter(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+
+ # Act
+ rule1 = Rule()
+ rule1.filter_type = 'CorrelationFilter'
+ rule1.filter_expression = 'myid'
+ created = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1', rule1)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_create_rule_with_options_empty_rule_action(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+
+ # Act
+ rule1 = Rule()
+ rule1.action_type = 'EmptyRuleAction'
+ rule1.action_expression = ''
+ created = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1', rule1)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_create_rule_with_options_sql_rule_action(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+
+ # Act
+ rule1 = Rule()
+ rule1.action_type = 'SqlRuleAction'
+ rule1.action_expression = "SET foo = 5"
+ created = self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1', rule1)
+
+ # Assert
+ self.assertTrue(created)
+
def test_list_rules(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
@@ -675,6 +855,27 @@ def test_get_rule_with_non_existing_rule(self):
# Assert
+ def test_get_rule_with_existing_rule_with_options(self):
+ # Arrange
+ self._create_topic_and_subscription(self.topic_name, 'MySubscription')
+ sent_rule = Rule()
+ sent_rule.filter_type = 'SqlFilter'
+ sent_rule.filter_expression = 'foo > 40'
+ sent_rule.action_type = 'SqlRuleAction'
+ sent_rule.action_expression = 'SET foo = 5'
+ self.sbs.create_rule(self.topic_name, 'MySubscription', 'MyRule1', sent_rule)
+
+ # Act
+ received_rule = self.sbs.get_rule(self.topic_name, 'MySubscription', 'MyRule1')
+
+ # Assert
+ self.assertIsNotNone(received_rule)
+ self.assertEquals(received_rule.name, 'MyRule1')
+ self.assertEquals(received_rule.filter_type, sent_rule.filter_type)
+ self.assertEquals(received_rule.filter_expression, sent_rule.filter_expression)
+ self.assertEquals(received_rule.action_type, sent_rule.action_type)
+ self.assertEquals(received_rule.action_expression, sent_rule.action_expression)
+
def test_delete_rule_with_existing_rule(self):
# Arrange
self._create_topic_and_subscription(self.topic_name, 'MySubscription')
diff --git a/test/azuretest/test_servicemanagementservice.py b/test/azuretest/test_servicemanagementservice.py
new file mode 100644
index 000000000000..d3382dae9c5d
--- /dev/null
+++ b/test/azuretest/test_servicemanagementservice.py
@@ -0,0 +1,1754 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+
+from azure import *
+from azure.servicemanagement import *
+from azure.storage.blobservice import *
+from azuretest.util import *
+
+import unittest
+import base64
+
+MANAGEMENT_CERT_PUBLICKEY = 'MIIBCgKCAQEAsjULNM53WPLkht1rbrDob/e4hZTHzj/hlLoBt2X3cNRc6dOPsMucxbMdchbCqAFa5RIaJvF5NDKqZuUSwq6bttD71twzy9bQ03EySOcRBad1VyqAZQ8DL8nUGSnXIUh+tpz4fDGM5f3Ly9NX8zfGqG3sT635rrFlUp3meJC+secCCwTLOOcIs3KQmuB+pMB5Y9rPhoxcekFfpq1pKtis6pmxnVbiL49kr6UUL6RQRDwik4t1jttatXLZqHETTmXl0Y0wS5AcJUXVAn5AL2kybULoThop2v01/E0NkPtFPAqLVs/kKBahniNn9uwUo+LS9FA8rWGu0FY4CZEYDfhb+QIDAQAB'
+MANAGEMENT_CERT_DATA = 'MIIC9jCCAeKgAwIBAgIQ00IFaqV9VqVJxI+wZka0szAJBgUrDgMCHQUAMBUxEzARBgNVBAMTClB5dGhvblRlc3QwHhcNMTIwODMwMDAyNTMzWhcNMzkxMjMxMjM1OTU5WjAVMRMwEQYDVQQDEwpQeXRob25UZXN0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsjULNM53WPLkht1rbrDob/e4hZTHzj/hlLoBt2X3cNRc6dOPsMucxbMdchbCqAFa5RIaJvF5NDKqZuUSwq6bttD71twzy9bQ03EySOcRBad1VyqAZQ8DL8nUGSnXIUh+tpz4fDGM5f3Ly9NX8zfGqG3sT635rrFlUp3meJC+secCCwTLOOcIs3KQmuB+pMB5Y9rPhoxcekFfpq1pKtis6pmxnVbiL49kr6UUL6RQRDwik4t1jttatXLZqHETTmXl0Y0wS5AcJUXVAn5AL2kybULoThop2v01/E0NkPtFPAqLVs/kKBahniNn9uwUo+LS9FA8rWGu0FY4CZEYDfhb+QIDAQABo0owSDBGBgNVHQEEPzA9gBBS6knRHo54LppngxVCCzZVoRcwFTETMBEGA1UEAxMKUHl0aG9uVGVzdIIQ00IFaqV9VqVJxI+wZka0szAJBgUrDgMCHQUAA4IBAQAnZbP3YV+08wI4YTg6MOVA+j1njd0kVp35FLehripmaMNE6lgk3Vu1MGGl0JnvMr3fNFGFzRske/jVtFxlHE5H/CoUzmyMQ+W06eV/e995AduwTKsS0ZgYn0VoocSXWst/nyhpKOcbJgAOohOYxgsGI1JEqQgjyeqzcCIhw/vlWiA3V8bSiPnrC9vwhH0eB025hBd2VbEGDz2nWCYkwtuOLMTvkmLi/oFw3GOfgagZKk8k/ZPffMCafz+yR3vb1nqAjncrVcJLI8amUfpxhjZYexo8MbxBA432M6w8sjXN+uLCl7ByWZ4xs4vonWgkmjeObtU37SIzolHT4dxIgaP2'
+
+SERVICE_CERT_FORMAT = 'pfx'
+SERVICE_CERT_PASSWORD = 'Python'
+SERVICE_CERT_DATA = 'MIIJ7AIBAzCCCagGCSqGSIb3DQEHAaCCCZkEggmVMIIJkTCCBfoGCSqGSIb3DQEHAaCCBesEggXnMIIF4zCCBd8GCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAhxOU59DvbmnAICB9AEggTYNM2UfOCtA1G0fhKNmu79z8/yUm5ybh5JamZqZ4Ra21wTc1khmVmWr0OAYhttaKtqtHfyFv7UY/cojg+fdOPCI+Fa8qQI7oXGEU7hS4O7VH3R/bDESctPB4TRdhjb88hLC+CdQc64PwjHFoaUHEQHFMsi7ujbi1u4Xg8YRqg4eKoG0AAraEQgyS3+1oWndtUOdqvOAsAG/bshiK47pgxMTgHpYjtOMtjcPqrwYq5aZQNWdJMXjl4JnmGJpO1dGqlSyr3uJuPobuq18diFS+JMJk/nQt50GF/SkscQn3TCLc6g6AjuKqdnSQTM34eNkZanKyyBuRmVUvM+zcKP6riiRDB86wrfNcT3sPDh9x6BSiTaxWKDk4IziWUMy8WJ/qItaVm2klIyez9JeEgcN2PhI2B1SFxH2qliyCmJ+308RFJHlQZDNZhpTRNgkulYfiswr5xOVEcU7J6eithmmD72xANSiiTbtFH10Bu10FN4SbSvOYQiGIjDVG4awAPVC9gURm88PciIimz1ne0WN3Ioj92BTC78kNoMI7+NDiVV01W+/CNK8J1WCTkKWRxTui8Ykm2z63gh9KmSZyEstFDFIz2WbJEKM8N4vjzGpNhRYOHpxFaCm2E/yoNj4MyHmo9XGtYsqhA0Jy12Wmx/fVGeZb3Az8Y5MYCQasc9XwvzACf2+RKsz6ey7jTb+Exo0gQB13PNFLEs83R57bDa8vgQupYBFcsamw/RvqmXn8sGw53kd71VVElrfaCNluvAFrLPdaH3F/+J8KHdV7Xs9A1ITvgpHbw2BnQBPwH3pSXZYh5+7it6WSNIHbv8h33Ue+vPLby5Huhg86R4nZkjJbeQXsfVpvC+llhOBHUX+UJth76a/d0iAewPO90rDNx+Nqff+Q7hPoUgxE8HtrbhZNY3qNFfyRGLbCZJpb+6DE7WsDSogFE5gY7gnmJwtT+FBlIocysaBn1NMH8fo/2zyuAOUfjHvuIR+K/NzcMdn5WL7bYjmvJwRIAaPScZV56NzNbZdHsHAU2ujvE+sGNmwr4wz3Db6Q9VfzkNWEzDmRlYEsRYNqQ/E7O2KQWETzZSGTEXgz57APE0d/cOprX+9PXZTdqqjOCU12YLtJobIcBZz+AFPMJRlY+pjuIu8wTzbWX7yoek3zmN9iZAZT5gNYCwIwo06Of2gvgssQ4X53QmJc/oD6WSyZpcS67JOQ8bHXIT1Lg9FBAfgXWEQ+BwIBK1SEJYlZJm0JkJ3Og7t3rgAmuv5YOfbFLo484946izfQeoUF5qrn/qSiqNOnYNMLvaXWT2pWE9V6u8max0l5dA5qNR772ahMQEH1iZu/K8gKfQ/z6Ea1yxFVwGtf9uNSuvS2M3MFa4Dos8FtxxQgOIEoiV4qc2yQIyiAKYusRI+K3PMnqSyg9S3eh0LCbuI8CYESpolrFCMyNFSwJpM+pUDA5GkRM/gYGLAhtZtLxgZBZYn81DgiRmk4igRIjNKWcy5l0eWN5KPBQve0QVXFB9z0A2GqOGEHJTZS5rww61hVaNyp2nBa8Mrd9afnogoEcb1SBRsU5QTsP91XGj8zdljL2t+jJDNUxi6nbNQN6onRY1ewpdCKxFzFyR/75nrEPBd8UrDTZ7k/FcNxIlAA2KPH2Dt3r8EZfEKDGBzTATBgkqhkiG9w0BCRUxBgQEAQAAADBXBgkqhkiG9w0BCRQxSh5IAGUANAA1ADcAOQAyAGYAYQAtAGUAMQA3AGUALQA0ADEAMgAzAC0AOQBiAGYANwAtADEAZQBjADkAMQA4ADMAOQAxAGIAOAAxMF0GCSsGAQQBgjcRATFQHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAHQAcgBvAG4AZwAgAEMAcgB5AHAAdABvAGcAcgBhAHAAaABpAGMAIABQAHIAbwB2AGkAZABlAHIwggOPBgkqhkiG9w0BBwagggOAMIIDfAIBADCCA3UGCSqGSIb3DQEHATAcBgoqhkiG9w0BDAEGMA4ECLA43UrS9nGWAgIH0ICCA0isAHOSVK2C8XAZpu2dTiJfB51QqgbUuZ4QdPu+INKT3x5x775SMC2wbFEjvjhA3hys6D/ALV4q97JpKc6YUDZMP4zl2yYx6Pr6chTudRCwlrAKqk0Sp0IBZrxZBVBgRsz9pt3VRR9bI9ElHD8j/ahZ+Hx+mxlfUePrabOqlzw9FVmrqBIhhmAs9Ax0l5mvY3p7ww1Vm0K2sVdOZdsKx27Cf7rg4rC6JJ3tPvTfJDUkTCPFgFtam+vZSiMoYbz00Kj2uPBJbkpG2ngjK8ONHzWq8PF6K6Feut5vrjeswR/bm9gGPtrjAU0qBuP5YfJqei6zvs+hXzYOcnnhxFlfHz/QvVJM9losSm17kq0SSqG4HD1XF6C6eiH3pySa2mnw3kEivulBYFUO2jmSGroNlwz6/LVoM+801h0vJayFxP7xRntQr0z5agzyNfCZ8249dgJ4y2UJmSRArdv5h+gYXIra2pNRHVUfPFTIZw3Yf5Uhz83ta3JxIM0BCtwQBsWpJSs3q9tokLQa/wJY6Qj5pVw3pxv+497DrOVCiCwAI3GVTa0QylscKFMnEjxIpYCLDNnY0fRXDYA94AfhDkdjlXLMFZLuwRrfTHqfyaDuFdq9cT2FuhM1J73reMriMGfu+UzTTWd4UZa/mGGRZM9eWvrIvgkvLQr+T250wa7igbJwh3FXRm7TqZSkLOpW3p+Losw0GJIz2k5DW61gkPYY0hMwzpniDrN8pc5BCo8Wtb4UBfW5+J5oQn2oKj2B3BuflL+jgYjXb6YRe1TTstJWmTR4/CrZc2ecNHTMGYlr7bOptaGcw9z/JaCjdoElUNSITVj6TQCa//jko+tdbM1cCtzE7Ty8ARs2XghxbhgLV5KyYZ0q06/tYvaT0vx4PZi64X1weIEmcHJRgdz9dC3+8SrtABoxxft9MD7DvtRNcWiZ+qdKfKEsGgZXYAPgYg/xObaiR9Sz2QGYv1BqoNAtalJLscn7UmGZnzjgyvD3GpvxPnZIZr3pAAyWZKUsL7eFCDjwJu/DlUni31ZI0sNJvcJZkWl5gGtuoTf3q4v80wKlNFVsUCrWRosITNlQun8Q+0NR6MZp8vvMKfRnJr7CkcZOAa7rzZjGF+EwOzAfMAcGBSsOAwIaBBQyyvu2Rm6lFW3e9sQk83bjO1g2pAQU8PYpZ4LXqCe9cmNgCFNqmt4fCOQCAgfQ'
+SERVICE_CERT_DATA_PUBLIC = 'MIIC9jCCAeKgAwIBAgIQ00IFaqV9VqVJxI+wZka0szAJBgUrDgMCHQUAMBUxEzARBgNVBAMTClB5dGhvblRlc3QwHhcNMTIwODMwMDAyNTMzWhcNMzkxMjMxMjM1OTU5WjAVMRMwEQYDVQQDEwpQeXRob25UZXN0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsjULNM53WPLkht1rbrDob/e4hZTHzj/hlLoBt2X3cNRc6dOPsMucxbMdchbCqAFa5RIaJvF5NDKqZuUSwq6bttD71twzy9bQ03EySOcRBad1VyqAZQ8DL8nUGSnXIUh+tpz4fDGM5f3Ly9NX8zfGqG3sT635rrFlUp3meJC+secCCwTLOOcIs3KQmuB+pMB5Y9rPhoxcekFfpq1pKtis6pmxnVbiL49kr6UUL6RQRDwik4t1jttatXLZqHETTmXl0Y0wS5AcJUXVAn5AL2kybULoThop2v01/E0NkPtFPAqLVs/kKBahniNn9uwUo+LS9FA8rWGu0FY4CZEYDfhb+QIDAQABo0owSDBGBgNVHQEEPzA9gBBS6knRHo54LppngxVCCzZVoRcwFTETMBEGA1UEAxMKUHl0aG9uVGVzdIIQ00IFaqV9VqVJxI+wZka0szAJBgUrDgMCHQUAA4IBAQAnZbP3YV+08wI4YTg6MOVA+j1njd0kVp35FLehripmaMNE6lgk3Vu1MGGl0JnvMr3fNFGFzRske/jVtFxlHE5H/CoUzmyMQ+W06eV/e995AduwTKsS0ZgYn0VoocSXWst/nyhpKOcbJgAOohOYxgsGI1JEqQgjyeqzcCIhw/vlWiA3V8bSiPnrC9vwhH0eB025hBd2VbEGDz2nWCYkwtuOLMTvkmLi/oFw3GOfgagZKk8k/ZPffMCafz+yR3vb1nqAjncrVcJLI8amUfpxhjZYexo8MbxBA432M6w8sjXN+uLCl7ByWZ4xs4vonWgkmjeObtU37SIzolHT4dxIgaP2'
+SERVICE_CERT_THUMBPRINT = 'BEA4B74BD6B915E9DD6A01FB1B8C3C1740F517F2'
+SERVICE_CERT_THUMBALGO = 'sha1'
+
+DEPLOYMENT_ORIGINAL_CONFIG = '''
+
+
+
+
+
+
+'''
+
+DEPLOYMENT_UPDATE_CONFIG = '''
+
+
+
+
+
+
+'''
+
+CSPKG_PATH = 'azuretest/data/WindowsAzure1.cspkg'
+DATA_VHD_PATH = 'azuretest/data/test.vhd'
+
+LINUX_IMAGE_NAME = 'OpenLogic__OpenLogic-CentOS-62-20120531-en-us-30GB.vhd'
+WINDOWS_IMAGE_NAME = 'MSFT__Win2K8R2SP1-Datacenter-201208.01-en.us-30GB.vhd'
+
+# This blob must be created manually before running the unit tests,
+# they must be present in the storage account listed in the credentials file.
+LINUX_OS_VHD_URL = credentials.getLinuxOSVHD()
+
+# The easiest way to create a Linux OS vhd is to use the Azure management
+# portal to create a Linux VM, and have it store the VHD in the
+# storage account listed in the credentials file. Then stop the VM,
+# and use the following code to copy the VHD to another blob (if you
+# try to use the VM's VHD directly without making a copy, you will get
+# conflict errors).
+
+#sourceblob = '/%s/%s/%s' % (credentials.getStorageServicesName(), 'vhdcontainername', 'vhdblobname')
+#self.bc.copy_blob('vhdcontainername', 'targetvhdblobname', sourceblob)
+
+
+#------------------------------------------------------------------------------
+class ServiceManagementServiceTest(AzureTestCase):
+
+ def setUp(self):
+ proxy_host = credentials.getProxyHost()
+ proxy_port = credentials.getProxyPort()
+
+ self.sms = ServiceManagementService(credentials.getSubscriptionId(), credentials.getManagementCertFile())
+ if proxy_host:
+ self.sms.set_proxy(proxy_host, proxy_port)
+
+ self.bc = BlobService(account_name=credentials.getStorageServicesName(), account_key=credentials.getStorageServicesKey())
+ if proxy_host:
+ self.bc.set_proxy(proxy_host, proxy_port)
+
+ self.affinity_group_name = getUniqueNameBasedOnCurrentTime('utaffgrp')
+ self.management_certificate_name = getUniqueNameBasedOnCurrentTime('utmgmtcert')
+ self.hosted_service_name = getUniqueNameBasedOnCurrentTime('utsvc')
+ self.storage_account_name = getUniqueNameBasedOnCurrentTime('utstorage')
+ self.container_name = getUniqueNameBasedOnCurrentTime('utctnr')
+ self.disk_name = getUniqueNameBasedOnCurrentTime('utdisk')
+ self.os_image_name = getUniqueNameBasedOnCurrentTime('utosimg')
+
+ self.data_disk_info = None
+
+ def tearDown(self):
+ if self.data_disk_info is not None:
+ try:
+ disk = self.sms.get_data_disk(self.data_disk_info[0], self.data_disk_info[1], self.data_disk_info[2], self.data_disk_info[3])
+ try:
+ result = self.sms.delete_data_disk(self.data_disk_info[0], self.data_disk_info[1], self.data_disk_info[2], self.data_disk_info[3])
+ self._wait_for_async(result.request_id)
+ except: pass
+ try:
+ self.sms.delete_disk(disk.disk_name)
+ except: pass
+ except: pass
+
+ disk_names = [self.disk_name]
+
+ try:
+ # Can't delete a hosted service if it has deployments, so delete those first
+ props = self.sms.get_hosted_service_properties(self.hosted_service_name, True)
+ for deployment in props.deployments:
+ try:
+ for role in deployment.role_list:
+ role_props = self.sms.get_role(self.hosted_service_name, deployment.name, role.role_name)
+ if role_props.os_virtual_hard_disk.disk_name not in disk_names:
+ disk_names.append(role_props.os_virtual_hard_disk.disk_name)
+ except: pass
+
+ try:
+ result = self.sms.delete_deployment(self.hosted_service_name, deployment.name)
+ self._wait_for_async(result.request_id)
+ except: pass
+ self.sms.delete_hosted_service(self.hosted_service_name)
+ except: pass
+
+ try:
+ self.sms.delete_storage_account(self.storage_account_name)
+ except: pass
+
+ try:
+ self.sms.delete_affinity_group(self.affinity_group_name)
+ except: pass
+
+ try:
+ self.sms.delete_management_certificate(self.management_certificate_name)
+ except: pass
+
+ try:
+ result = self.sms.delete_os_image(self.os_image_name)
+ self._wait_for_async(result.request_id)
+ except: pass
+
+ for disk_name in disk_names:
+ try:
+ self.sms.delete_disk(disk_name)
+ except: pass
+
+ try:
+ self.bc.delete_container(self.container_name)
+ except: pass
+
+ #--Helpers-----------------------------------------------------------------
+ def _wait_for_async(self, request_id):
+ result = self.sms.get_operation_status(request_id)
+ while result.status == 'InProgress':
+ time.sleep(5)
+ result = self.sms.get_operation_status(request_id)
+ self.assertEqual(result.status, 'Succeeded')
+
+ def _wait_for_deployment_status(self, service_name, deployment_name, status):
+ props = self.sms.get_deployment_by_name(service_name, deployment_name)
+ while props.status != status:
+ time.sleep(5)
+ props = self.sms.get_deployment_by_name(service_name, deployment_name)
+
+ def _wait_for_role_instance_status(self, service_name, deployment_name, role_instance_name, status):
+ props = self.sms.get_deployment_by_name(service_name, deployment_name)
+ while self._get_role_instance_status(props, role_instance_name) != status:
+ time.sleep(5)
+ props = self.sms.get_deployment_by_name(service_name, deployment_name)
+
+ def _wait_for_rollback_allowed(self, service_name, deployment_name):
+ props = self.sms.get_deployment_by_name(service_name, deployment_name)
+ while props.rollback_allowed == False:
+ time.sleep(5)
+ props = self.sms.get_deployment_by_name(service_name, deployment_name)
+
+ def _get_role_instance_status(self, deployment, role_instance_name):
+ for role_instance in deployment.role_instance_list:
+ if role_instance.instance_name == role_instance_name:
+ return role_instance.instance_status
+ return None
+
+ def _create_hosted_service(self, name):
+ result = self.sms.create_hosted_service(name, name + 'label', name + 'description', 'West US', None, {'ext1':'val1', 'ext2':42})
+ self.assertIsNone(result)
+
+ def _hosted_service_exists(self, name):
+ try:
+ props = self.sms.get_hosted_service_properties(name)
+ return props is not None
+ except:
+ return False
+
+ def _create_service_certificate(self, service_name, data, format, password):
+ result = self.sms.add_service_certificate(service_name, data, format, password)
+ self._wait_for_async(result.request_id)
+
+ def _service_certificate_exists(self, service_name, thumbalgorithm, thumbprint):
+ try:
+ props = self.sms.get_service_certificate(service_name, thumbalgorithm, thumbprint)
+ return props is not None
+ except:
+ return False
+
+ def _deployment_exists(self, service_name, deployment_name):
+ try:
+ props = self.sms.get_deployment_by_name(service_name, deployment_name)
+ return props is not None
+ except:
+ return False
+
+ def _create_container_and_block_blob(self, container_name, blob_name, blob_data):
+ self.bc.create_container(container_name, None, 'container', False)
+ resp = self.bc.put_blob(container_name, blob_name, blob_data, 'BlockBlob')
+ self.assertIsNone(resp)
+
+ def _create_container_and_page_blob(self, container_name, blob_name, content_length):
+ self.bc.create_container(container_name, None, 'container', False)
+ resp = self.bc.put_blob(container_name, blob_name, '', 'PageBlob', x_ms_blob_content_length=str(content_length))
+ self.assertIsNone(resp)
+
+ def _upload_file_to_block_blob(self, file_path, blob_name):
+ data = open(file_path, 'rb').read()
+ url = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + self.container_name + '/' + blob_name
+ self._create_container_and_block_blob(self.container_name, blob_name, data)
+ return url
+
+ def _upload_chunks(self, file_path, blob_name, chunk_size):
+ index = 0
+ with open(file_path, 'rb') as f:
+ while True:
+ data = f.read(chunk_size)
+ if data:
+ length = len(data)
+ self.bc.put_page(self.container_name, blob_name, data, 'bytes=' + str(index) + '-' + str(index + length - 1), 'update')
+ index += length
+ else:
+ break
+
+ def _upload_file_to_page_blob(self, file_path, blob_name):
+ url = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + self.container_name + '/' + blob_name
+ content_length = os.path.getsize(file_path)
+ self._create_container_and_page_blob(self.container_name, blob_name, content_length)
+ self._upload_chunks(file_path, blob_name, 262144)
+ return url
+
+ def _upload_default_package_to_storage_blob(self, blob_name):
+ return self._upload_file_to_block_blob(CSPKG_PATH, blob_name)
+
+ def _upload_disk_to_storage_blob(self, blob_name):
+ return self._upload_file_to_page_blob(DATA_VHD_PATH, blob_name)
+
+ def _add_deployment(self, service_name, deployment_name, deployment_slot='Production'):
+ configuration = base64.b64encode(DEPLOYMENT_ORIGINAL_CONFIG)
+ package_url = self._upload_default_package_to_storage_blob(deployment_name + 'Blob')
+ result = self.sms.create_deployment(service_name, deployment_slot, deployment_name, package_url, deployment_name + 'label', configuration, False, False, { 'dep1':'val1', 'dep2':'val2'})
+ self._wait_for_async(result.request_id)
+
+ def _create_hosted_service_with_deployment(self, service_name, deployment_name):
+ self._create_hosted_service(service_name)
+ self._add_deployment(service_name, deployment_name)
+
+ def _create_affinity_group(self, name):
+ result = self.sms.create_affinity_group(name, 'tstmgmtaffgrp', 'West US', 'tstmgmt affinity group')
+ self.assertIsNone(result)
+
+ def _affinity_group_exists(self, name):
+ try:
+ props = self.sms.get_affinity_group_properties(name)
+ return props is not None
+ except:
+ return False
+
+ def _create_management_certificate(self, thumbprint):
+ result = self.sms.add_management_certificate(MANAGEMENT_CERT_PUBLICKEY, thumbprint, MANAGEMENT_CERT_DATA)
+ self.assertIsNone(result)
+
+ def _management_certificate_exists(self, thumbprint):
+ try:
+ props = self.sms.get_management_certificate(thumbprint)
+ return props is not None
+ except:
+ return False
+
+ def _create_storage_account(self, name):
+ result = self.sms.create_storage_account(name, name + 'description', name + 'label', None, 'West US', False, {'ext1':'val1', 'ext2':42})
+ self._wait_for_async(result.request_id)
+
+ def _storage_account_exists(self, name):
+ try:
+ props = self.sms.get_storage_account_properties(name)
+ return props is not None
+ except:
+ return False
+
+ def _role_exists(self, service_name, deployment_name, role_name):
+ try:
+ props = self.sms.get_role(service_name, deployment_name, role_name)
+ return props is not None
+ except:
+ return False
+
+ def _create_disk(self, disk_name, os, url):
+ result = self.sms.add_disk(False, disk_name, url, disk_name, os)
+ self.assertIsNone(result)
+
+ def _disk_exists(self, disk_name):
+ try:
+ disk = self.sms.get_disk(disk_name)
+ return disk is not None
+ except:
+ return False
+
+ def _create_os_image(self, name, blob_url, os):
+ result = self.sms.add_os_image(name + 'label', blob_url, name, os)
+ self._wait_for_async(result.request_id)
+
+ def _os_image_exists(self, image_name):
+ try:
+ image = self.sms.get_os_image(image_name)
+ return image is not None
+ except:
+ return False
+
+ def _blob_exists(self, container_name, blob_name):
+ try:
+ props = self.bc.get_blob_properties(container_name, blob_name)
+ return props is not None
+ except:
+ return False
+
+ def _data_disk_exists(self, service_name, deployment_name, role_name, lun):
+ try:
+ props = self.sms.get_data_disk(service_name, deployment_name, role_name, lun)
+ return props is not None
+ except:
+ return False
+
+ def _add_data_disk_from_blob_url(self, service_name, deployment_name, role_name, lun, label):
+ url = self._upload_disk_to_storage_blob('disk')
+ result = self.sms.add_data_disk(service_name, deployment_name, role_name, lun, None, None, label, None, None, url)
+ self._wait_for_async(result.request_id)
+
+ def _create_vm_linux(self, service_name, deployment_name, role_name, target_container_name, target_blob_name):
+ image_name = LINUX_IMAGE_NAME
+ media_link = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + target_container_name + '/' + target_blob_name
+ system = LinuxConfigurationSet('computername', 'unittest', 'u7;9jbp!', True)
+ system.ssh = None
+ os_hd = OSVirtualHardDisk(image_name, media_link, disk_label = target_blob_name)
+ network = ConfigurationSet()
+ network.configuration_set_type = 'NetworkConfiguration'
+ network.input_endpoints.input_endpoints.append(ConfigurationSetInputEndpoint('utendpoint', 'tcp', '59913', '3394'))
+
+ self._create_hosted_service(service_name)
+
+ result = self.sms.create_virtual_machine_deployment(service_name, deployment_name, 'staging', deployment_name + 'label', role_name, system, os_hd, network_config=network, role_size='Small')
+ self._wait_for_async(result.request_id)
+ self._wait_for_deployment_status(service_name, deployment_name, 'Running')
+
+ def _create_vm_windows(self, service_name, deployment_name, role_name, target_container_name, target_blob_name):
+ image_name = WINDOWS_IMAGE_NAME
+ media_link = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + target_container_name + '/' + target_blob_name
+ system = WindowsConfigurationSet('computername', 'u7;9jbp!', False, False, 'Pacific Standard Time')
+ system.domain_join = None
+ system.stored_certificate_settings.stored_certificate_settings.append(CertificateSetting(SERVICE_CERT_THUMBPRINT, 'My', 'LocalMachine'))
+ os_hd = OSVirtualHardDisk(image_name, media_link, disk_label = target_blob_name)
+ network = ConfigurationSet()
+ network.configuration_set_type = 'NetworkConfiguration'
+ network.input_endpoints.input_endpoints.append(ConfigurationSetInputEndpoint('utendpoint', 'tcp', '59913', '3394'))
+
+ self._create_hosted_service(service_name)
+ self._create_service_certificate(service_name, SERVICE_CERT_DATA, 'pfx', SERVICE_CERT_PASSWORD)
+
+ result = self.sms.create_virtual_machine_deployment(service_name, deployment_name, 'staging', deployment_name + 'label', role_name, system, os_hd, network_config=network, role_size='Small')
+ self._wait_for_async(result.request_id)
+ self._wait_for_deployment_status(service_name, deployment_name, 'Running')
+
+ def _add_role_windows(self, service_name, deployment_name, role_name2):
+ image_name = WINDOWS_IMAGE_NAME
+ target_container_name = 'vhds'
+ target_blob_name = role_name2 + '.vhd'
+ media_link = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + target_container_name + '/' + target_blob_name
+
+ system = WindowsConfigurationSet('computer2', 'u7;9jbp!', False, False, 'Pacific Standard Time')
+ system.domain_join = None
+ system.stored_certificate_settings.stored_certificate_settings.append(CertificateSetting(SERVICE_CERT_THUMBPRINT, 'My', 'LocalMachine'))
+
+ os_hd = OSVirtualHardDisk(image_name, media_link)
+
+ result = self.sms.add_role(service_name, deployment_name, role_name2, system, os_hd)
+ self._wait_for_async(result.request_id)
+
+ #--Test cases for storage accounts -----------------------------------
+ def test_list_storage_accounts(self):
+ # Arrange
+ self._create_storage_account(self.storage_account_name)
+
+ # Act
+ result = self.sms.list_storage_accounts()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+
+ storage = None
+ for temp in result:
+ if temp.service_name == self.storage_account_name:
+ storage = temp
+ break
+
+ self.assertIsNotNone(storage)
+ self.assertIsNotNone(storage.service_name)
+ self.assertIsNone(storage.storage_service_keys)
+ self.assertIsNotNone(storage.storage_service_properties)
+ self.assertIsNotNone(storage.storage_service_properties.affinity_group)
+ self.assertIsNotNone(storage.storage_service_properties.description)
+ self.assertIsNotNone(storage.storage_service_properties.geo_primary_region)
+ self.assertIsNotNone(storage.storage_service_properties.geo_replication_enabled)
+ self.assertIsNotNone(storage.storage_service_properties.geo_secondary_region)
+ self.assertIsNotNone(storage.storage_service_properties.label)
+ self.assertIsNotNone(storage.storage_service_properties.last_geo_failover_time)
+ self.assertIsNotNone(storage.storage_service_properties.location)
+ self.assertIsNotNone(storage.storage_service_properties.status)
+ self.assertIsNotNone(storage.storage_service_properties.status_of_primary)
+ self.assertIsNotNone(storage.storage_service_properties.status_of_secondary)
+ self.assertIsNotNone(storage.storage_service_properties.endpoints)
+ self.assertTrue(len(storage.storage_service_properties.endpoints) > 0)
+ self.assertIsNotNone(storage.extended_properties)
+ self.assertTrue(len(storage.extended_properties) > 0)
+
+ def test_get_storage_account_properties(self):
+ # Arrange
+ self._create_storage_account(self.storage_account_name)
+
+ # Act
+ result = self.sms.get_storage_account_properties(self.storage_account_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertEqual(result.service_name, self.storage_account_name)
+ self.assertIsNotNone(result.url)
+ self.assertIsNone(result.storage_service_keys)
+ self.assertIsNotNone(result.storage_service_properties)
+ self.assertIsNotNone(result.storage_service_properties.affinity_group)
+ self.assertIsNotNone(result.storage_service_properties.description)
+ self.assertIsNotNone(result.storage_service_properties.geo_primary_region)
+ self.assertIsNotNone(result.storage_service_properties.geo_replication_enabled)
+ self.assertIsNotNone(result.storage_service_properties.geo_secondary_region)
+ self.assertIsNotNone(result.storage_service_properties.label)
+ self.assertIsNotNone(result.storage_service_properties.last_geo_failover_time)
+ self.assertIsNotNone(result.storage_service_properties.location)
+ self.assertIsNotNone(result.storage_service_properties.status)
+ self.assertIsNotNone(result.storage_service_properties.status_of_primary)
+ self.assertIsNotNone(result.storage_service_properties.status_of_secondary)
+ self.assertIsNotNone(result.storage_service_properties.endpoints)
+ self.assertTrue(len(result.storage_service_properties.endpoints) > 0)
+ self.assertIsNotNone(result.extended_properties)
+ self.assertTrue(len(result.extended_properties) > 0)
+ self.assertIsNotNone(result.capabilities)
+ self.assertTrue(len(result.capabilities) > 0)
+
+ def test_get_storage_account_keys(self):
+ # Arrange
+ self._create_storage_account(self.storage_account_name)
+
+ # Act
+ result = self.sms.get_storage_account_keys(self.storage_account_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertIsNotNone(result.url)
+ self.assertIsNotNone(result.service_name)
+ self.assertIsNotNone(result.storage_service_keys.primary)
+ self.assertIsNotNone(result.storage_service_keys.secondary)
+ self.assertIsNone(result.storage_service_properties)
+
+ def test_regenerate_storage_account_keys(self):
+ # Arrange
+ self._create_storage_account(self.storage_account_name)
+ previous = self.sms.get_storage_account_keys(self.storage_account_name)
+
+ # Act
+ result = self.sms.regenerate_storage_account_keys(self.storage_account_name, 'Secondary')
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertIsNotNone(result.url)
+ self.assertIsNotNone(result.service_name)
+ self.assertIsNotNone(result.storage_service_keys.primary)
+ self.assertIsNotNone(result.storage_service_keys.secondary)
+ self.assertIsNone(result.storage_service_properties)
+ self.assertEqual(result.storage_service_keys.primary, previous.storage_service_keys.primary)
+ self.assertNotEqual(result.storage_service_keys.secondary, previous.storage_service_keys.secondary)
+
+ def test_create_storage_account(self):
+ # Arrange
+ description = self.storage_account_name + 'description'
+ label = self.storage_account_name + 'label'
+
+ # Act
+ result = self.sms.create_storage_account(self.storage_account_name, description, label, None, 'West US', True, {'ext1':'val1', 'ext2':42})
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._storage_account_exists(self.storage_account_name))
+
+ def test_update_storage_account(self):
+ # Arrange
+ self._create_storage_account(self.storage_account_name)
+ description = self.storage_account_name + 'descriptionupdate'
+ label = self.storage_account_name + 'labelupdate'
+
+ # Act
+ result = self.sms.update_storage_account(self.storage_account_name, description, label, False, {'ext1':'val1update', 'ext2':53, 'ext3':'brandnew'})
+
+ # Assert
+ self.assertIsNone(result)
+ props = self.sms.get_storage_account_properties(self.storage_account_name)
+ self.assertEqual(props.storage_service_properties.description, description)
+ self.assertEqual(props.storage_service_properties.label, label)
+ self.assertEqual(props.extended_properties['ext1'], 'val1update')
+ self.assertEqual(props.extended_properties['ext2'], '53')
+ self.assertEqual(props.extended_properties['ext3'], 'brandnew')
+
+ def test_delete_storage_account(self):
+ # Arrange
+ self._create_storage_account(self.storage_account_name)
+
+ # Act
+ result = self.sms.delete_storage_account(self.storage_account_name)
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertFalse(self._storage_account_exists(self.storage_account_name))
+
+ def test_check_storage_account_name_availability_not_available(self):
+ # Arrange
+ self._create_storage_account(self.storage_account_name)
+
+ # Act
+ result = self.sms.check_storage_account_name_availability(self.storage_account_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertFalse(result.result)
+
+ def test_check_storage_account_name_availability_available(self):
+ # Arrange
+
+ # Act
+ result = self.sms.check_storage_account_name_availability(self.storage_account_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(result.result)
+
+ #--Test cases for hosted services ------------------------------------
+ def test_list_hosted_services(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+
+ # Act
+ result = self.sms.list_hosted_services()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+
+ service = None
+ for temp in result:
+ if temp.service_name == self.hosted_service_name:
+ service = temp
+ break
+
+ self.assertIsNotNone(service)
+ self.assertIsNotNone(service.service_name)
+ self.assertIsNotNone(service.url)
+ self.assertIsNotNone(service.hosted_service_properties)
+ self.assertIsNotNone(service.hosted_service_properties.affinity_group)
+ self.assertIsNotNone(service.hosted_service_properties.date_created)
+ self.assertIsNotNone(service.hosted_service_properties.date_last_modified)
+ self.assertIsNotNone(service.hosted_service_properties.description)
+ self.assertIsNotNone(service.hosted_service_properties.label)
+ self.assertIsNotNone(service.hosted_service_properties.location)
+ self.assertIsNotNone(service.hosted_service_properties.status)
+ self.assertIsNotNone(service.hosted_service_properties.extended_properties['ext1'])
+ self.assertIsNotNone(service.hosted_service_properties.extended_properties['ext2'])
+ self.assertIsNone(service.deployments)
+
+ def test_get_hosted_service_properties(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+
+ # Act
+ result = self.sms.get_hosted_service_properties(self.hosted_service_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertIsNotNone(result.service_name)
+ self.assertIsNotNone(result.url)
+ self.assertIsNotNone(result.hosted_service_properties)
+ self.assertIsNotNone(result.hosted_service_properties.affinity_group)
+ self.assertIsNotNone(result.hosted_service_properties.date_created)
+ self.assertIsNotNone(result.hosted_service_properties.date_last_modified)
+ self.assertIsNotNone(result.hosted_service_properties.description)
+ self.assertIsNotNone(result.hosted_service_properties.label)
+ self.assertIsNotNone(result.hosted_service_properties.location)
+ self.assertIsNotNone(result.hosted_service_properties.status)
+ self.assertIsNotNone(result.hosted_service_properties.extended_properties['ext1'])
+ self.assertIsNotNone(result.hosted_service_properties.extended_properties['ext2'])
+ self.assertIsNone(result.deployments)
+
+ def test_get_hosted_service_properties_with_embed_detail(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+
+ # Act
+ result = self.sms.get_hosted_service_properties(self.hosted_service_name, True)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertIsNotNone(result.service_name)
+ self.assertIsNotNone(result.url)
+ self.assertIsNotNone(result.hosted_service_properties)
+ self.assertIsNotNone(result.hosted_service_properties.affinity_group)
+ self.assertIsNotNone(result.hosted_service_properties.date_created)
+ self.assertIsNotNone(result.hosted_service_properties.date_last_modified)
+ self.assertIsNotNone(result.hosted_service_properties.description)
+ self.assertIsNotNone(result.hosted_service_properties.label)
+ self.assertIsNotNone(result.hosted_service_properties.location)
+ self.assertIsNotNone(result.hosted_service_properties.status)
+ self.assertIsNotNone(result.hosted_service_properties.extended_properties['ext1'])
+ self.assertIsNotNone(result.hosted_service_properties.extended_properties['ext2'])
+
+ self.assertIsNotNone(result.deployments)
+ self.assertIsNotNone(result.deployments[0].configuration)
+ self.assertIsNotNone(result.deployments[0].created_time)
+ self.assertIsNotNone(result.deployments[0].deployment_slot)
+ self.assertIsNotNone(result.deployments[0].extended_properties['dep1'])
+ self.assertIsNotNone(result.deployments[0].extended_properties['dep2'])
+ self.assertIsNotNone(result.deployments[0].label)
+ self.assertIsNotNone(result.deployments[0].last_modified_time)
+ self.assertFalse(result.deployments[0].locked)
+ self.assertEqual(result.deployments[0].name, deployment_name)
+ self.assertIsNone(result.deployments[0].persistent_vm_downtime_info)
+ self.assertIsNotNone(result.deployments[0].private_id)
+ self.assertIsNotNone(result.deployments[0].role_list[0].os_version)
+ self.assertEqual(result.deployments[0].role_list[0].role_name, 'WorkerRole1')
+ self.assertFalse(result.deployments[0].rollback_allowed)
+ self.assertIsNotNone(result.deployments[0].sdk_version)
+ self.assertIsNotNone(result.deployments[0].status)
+ self.assertIsNotNone(result.deployments[0].upgrade_domain_count)
+ self.assertIsNone(result.deployments[0].upgrade_status)
+ self.assertIsNotNone(result.deployments[0].url)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].fqdn)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].instance_error_code)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].instance_fault_domain)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].instance_name)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].instance_size)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].instance_state_details)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].instance_status)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].instance_upgrade_domain)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].ip_address)
+ self.assertIsNotNone(result.deployments[0].role_instance_list[0].power_state)
+ self.assertEqual(result.deployments[0].role_instance_list[0].role_name, 'WorkerRole1')
+
+ def test_create_hosted_service(self):
+ # Arrange
+ label = 'pythonlabel'
+ description = 'python hosted service description'
+ location = 'West US'
+
+ # Act
+ result = self.sms.create_hosted_service(self.hosted_service_name, label, description, location, None, {'ext1':'val1','ext2':'val2'})
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertTrue(self._hosted_service_exists(self.hosted_service_name))
+
+ def test_update_hosted_service(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+ label = 'ptvslabelupdate'
+ description = 'ptvs description update'
+
+ # Act
+ result = self.sms.update_hosted_service(self.hosted_service_name, label, description, {'ext1':'val1update','ext2':'val2update','ext3':'brandnew'})
+
+ # Assert
+ self.assertIsNone(result)
+ props = self.sms.get_hosted_service_properties(self.hosted_service_name)
+ self.assertEqual(props.hosted_service_properties.label, label)
+ self.assertEqual(props.hosted_service_properties.description, description)
+ self.assertEqual(props.hosted_service_properties.extended_properties['ext1'], 'val1update')
+ self.assertEqual(props.hosted_service_properties.extended_properties['ext2'], 'val2update')
+ self.assertEqual(props.hosted_service_properties.extended_properties['ext3'], 'brandnew')
+
+ def test_delete_hosted_service(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+
+ # Act
+ result = self.sms.delete_hosted_service(self.hosted_service_name)
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertFalse(self._hosted_service_exists(self.hosted_service_name))
+
+ def test_get_deployment_by_slot(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+
+ # Act
+ result = self.sms.get_deployment_by_slot(self.hosted_service_name, 'Production')
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertEqual(result.name, deployment_name)
+ self.assertEqual(result.deployment_slot, 'Production')
+ self.assertIsNotNone(result.label)
+ self.assertIsNotNone(result.configuration)
+
+ def test_get_deployment_by_name(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+
+ # Act
+ result = self.sms.get_deployment_by_name(self.hosted_service_name, deployment_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertEqual(result.name, deployment_name)
+ self.assertEqual(result.deployment_slot, 'Production')
+ self.assertIsNotNone(result.label)
+ self.assertIsNotNone(result.configuration)
+
+ def test_create_deployment(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+ configuration = base64.b64encode(DEPLOYMENT_ORIGINAL_CONFIG)
+ package_url = self._upload_default_package_to_storage_blob('WindowsAzure1Blob')
+
+ # Act
+ result = self.sms.create_deployment(self.hosted_service_name, 'production', 'WindowsAzure1', package_url, 'deploylabel', configuration)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._deployment_exists(self.hosted_service_name, 'WindowsAzure1'))
+
+ def test_delete_deployment(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+
+ # Act
+ result = self.sms.delete_deployment(self.hosted_service_name, deployment_name)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertFalse(self._deployment_exists(self.hosted_service_name, deployment_name))
+
+ def test_swap_deployment(self):
+ # Arrange
+ production_deployment_name = 'utdeployprod'
+ staging_deployment_name = 'utdeploystag'
+ self._create_hosted_service(self.hosted_service_name)
+ self._add_deployment(self.hosted_service_name, production_deployment_name, 'Production')
+ self._add_deployment(self.hosted_service_name, staging_deployment_name, 'Staging')
+
+ # Act
+ result = self.sms.swap_deployment(self.hosted_service_name, production_deployment_name, staging_deployment_name)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ deploy = self.sms.get_deployment_by_slot(self.hosted_service_name, 'Production')
+ self.assertIsNotNone(deploy)
+ self.assertEqual(deploy.name, staging_deployment_name)
+ self.assertEqual(deploy.deployment_slot, 'Production')
+
+ deploy = self.sms.get_deployment_by_slot(self.hosted_service_name, 'Staging')
+ self.assertIsNotNone(deploy)
+ self.assertEqual(deploy.name, production_deployment_name)
+ self.assertEqual(deploy.deployment_slot, 'Staging')
+
+ def test_change_deployment_configuration(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+ configuration = base64.b64encode(DEPLOYMENT_UPDATE_CONFIG)
+
+ # Act
+ result = self.sms.change_deployment_configuration(self.hosted_service_name, deployment_name, configuration)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ props = self.sms.get_deployment_by_name(self.hosted_service_name, deployment_name)
+ self.assertTrue(props.configuration.find('Instances count="4"') >= 0)
+
+ def test_update_deployment_status(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+
+ # Act
+ result = self.sms.update_deployment_status(self.hosted_service_name, deployment_name, 'Suspended')
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ props = self.sms.get_deployment_by_name(self.hosted_service_name, deployment_name)
+ self.assertEqual(props.status, 'Suspended')
+
+ def test_upgrade_deployment(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+ package_url = self._upload_default_package_to_storage_blob('updated')
+ configuration = base64.b64encode(DEPLOYMENT_UPDATE_CONFIG)
+
+ # Act
+ result = self.sms.upgrade_deployment(self.hosted_service_name, deployment_name, 'Auto', package_url, configuration, 'upgraded', True)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ props = self.sms.get_deployment_by_name(self.hosted_service_name, deployment_name)
+ self.assertEqual(props.label, 'upgraded')
+ self.assertTrue(props.configuration.find('Instances count="4"') >= 0)
+
+ def test_walk_upgrade_domain(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+ package_url = self._upload_default_package_to_storage_blob('updated')
+ configuration = base64.b64encode(DEPLOYMENT_UPDATE_CONFIG)
+ result = self.sms.upgrade_deployment(self.hosted_service_name, deployment_name, 'Manual', package_url, configuration, 'upgraded', True)
+ self._wait_for_async(result.request_id)
+
+ # Act
+ result = self.sms.walk_upgrade_domain(self.hosted_service_name, deployment_name, 0)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ props = self.sms.get_deployment_by_name(self.hosted_service_name, deployment_name)
+ self.assertEqual(props.label, 'upgraded')
+ self.assertTrue(props.configuration.find('Instances count="4"') >= 0)
+
+ def test_rollback_update_or_upgrade(self):
+ # Arrange
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+ package_url = self._upload_default_package_to_storage_blob('updated207')
+ configuration = base64.b64encode(DEPLOYMENT_UPDATE_CONFIG)
+
+ self.sms.upgrade_deployment(self.hosted_service_name, deployment_name, 'Auto', package_url, configuration, 'upgraded', True)
+ self._wait_for_rollback_allowed(self.hosted_service_name, deployment_name)
+
+ # Act
+ result = self.sms.rollback_update_or_upgrade(self.hosted_service_name, deployment_name, 'Auto', True)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ props = self.sms.get_deployment_by_name(self.hosted_service_name, deployment_name)
+ self.assertTrue(props.configuration.find('Instances count="2"') >= 0)
+
+ def test_reboot_role_instance(self):
+ # Arrange
+ role_instance_name = 'WorkerRole1_IN_0'
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+ result = self.sms.update_deployment_status(self.hosted_service_name, deployment_name, 'Running')
+ self._wait_for_async(result.request_id)
+ self._wait_for_deployment_status(self.hosted_service_name, deployment_name, 'Running')
+ self._wait_for_role_instance_status(self.hosted_service_name, deployment_name, role_instance_name, 'ReadyRole')
+
+ # Act
+ result = self.sms.reboot_role_instance(self.hosted_service_name, deployment_name, role_instance_name)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ props = self.sms.get_deployment_by_name(self.hosted_service_name, deployment_name)
+ status = self._get_role_instance_status(props, role_instance_name)
+ self.assertTrue(status == 'StoppedVM' or status =='ReadyRole')
+
+ def test_reimage_role_instance(self):
+ # Arrange
+ role_instance_name = 'WorkerRole1_IN_0'
+ deployment_name = 'utdeployment'
+ self._create_hosted_service_with_deployment(self.hosted_service_name, deployment_name)
+ result = self.sms.update_deployment_status(self.hosted_service_name, deployment_name, 'Running')
+ self._wait_for_async(result.request_id)
+ self._wait_for_deployment_status(self.hosted_service_name, deployment_name, 'Running')
+ self._wait_for_role_instance_status(self.hosted_service_name, deployment_name, role_instance_name, 'ReadyRole')
+
+ # Act
+ result = self.sms.reimage_role_instance(self.hosted_service_name, deployment_name, role_instance_name)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ props = self.sms.get_deployment_by_name(self.hosted_service_name, deployment_name)
+ status = self._get_role_instance_status(props, role_instance_name)
+ self.assertTrue(status == 'StoppedVM' or status =='ReadyRole')
+
+ def test_check_hosted_service_name_availability_not_available(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+
+ # Act
+ result = self.sms.check_hosted_service_name_availability(self.hosted_service_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertFalse(result.result)
+
+ def test_check_hosted_service_name_availability_available(self):
+ # Arrange
+
+ # Act
+ result = self.sms.check_hosted_service_name_availability(self.hosted_service_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(result.result)
+
+ #--Test cases for service certificates -------------------------------
+ def test_list_service_certificates(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+ self._create_service_certificate(self.hosted_service_name, SERVICE_CERT_DATA, SERVICE_CERT_FORMAT, SERVICE_CERT_PASSWORD)
+
+ # Act
+ result = self.sms.list_service_certificates(self.hosted_service_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+
+ url_part = '/' + self.hosted_service_name + '/'
+ cert = None
+ for temp in result:
+ if url_part in temp.certificate_url:
+ cert = temp
+ break
+
+ self.assertIsNotNone(cert)
+ self.assertIsNotNone(cert.certificate_url)
+ self.assertEqual(cert.thumbprint, SERVICE_CERT_THUMBPRINT)
+ self.assertEqual(cert.thumbprint_algorithm, SERVICE_CERT_THUMBALGO)
+ self.assertEqual(cert.data, SERVICE_CERT_DATA_PUBLIC)
+
+ def test_get_service_certificate(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+ self._create_service_certificate(self.hosted_service_name, SERVICE_CERT_DATA, SERVICE_CERT_FORMAT, SERVICE_CERT_PASSWORD)
+
+ # Act
+ result = self.sms.get_service_certificate(self.hosted_service_name, SERVICE_CERT_THUMBALGO, SERVICE_CERT_THUMBPRINT)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertEqual(result.certificate_url, '')
+ self.assertEqual(result.thumbprint, '')
+ self.assertEqual(result.thumbprint_algorithm, '')
+ self.assertEqual(result.data, SERVICE_CERT_DATA_PUBLIC)
+
+ def test_add_service_certificate(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+
+ # Act
+ result = self.sms.add_service_certificate(self.hosted_service_name, SERVICE_CERT_DATA, SERVICE_CERT_FORMAT, SERVICE_CERT_PASSWORD)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._service_certificate_exists(self.hosted_service_name, SERVICE_CERT_THUMBALGO, SERVICE_CERT_THUMBPRINT))
+
+ def test_delete_service_certificate(self):
+ # Arrange
+ self._create_hosted_service(self.hosted_service_name)
+ self._create_service_certificate(self.hosted_service_name, SERVICE_CERT_DATA, SERVICE_CERT_FORMAT, SERVICE_CERT_PASSWORD)
+
+ # Act
+ result = self.sms.delete_service_certificate(self.hosted_service_name, SERVICE_CERT_THUMBALGO, SERVICE_CERT_THUMBPRINT)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertFalse(self._service_certificate_exists(self.hosted_service_name, SERVICE_CERT_THUMBALGO, SERVICE_CERT_THUMBPRINT))
+
+ #--Test cases for management certificates ----------------------------
+ def test_list_management_certificates(self):
+ # Arrange
+ self._create_management_certificate(self.management_certificate_name)
+
+ # Act
+ result = self.sms.list_management_certificates()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+
+ cert = None
+ for temp in result:
+ if temp.subscription_certificate_thumbprint == self.management_certificate_name:
+ cert = temp
+ break
+
+ self.assertIsNotNone(cert)
+ self.assertIsNotNone(cert.created)
+ self.assertEqual(cert.subscription_certificate_public_key, MANAGEMENT_CERT_PUBLICKEY)
+ self.assertEqual(cert.subscription_certificate_data, MANAGEMENT_CERT_DATA)
+ self.assertEqual(cert.subscription_certificate_thumbprint, self.management_certificate_name)
+
+ def test_get_management_certificate(self):
+ # Arrange
+ self._create_management_certificate(self.management_certificate_name)
+
+ # Act
+ result = self.sms.get_management_certificate(self.management_certificate_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertIsNotNone(result.created)
+ self.assertEqual(result.subscription_certificate_public_key, MANAGEMENT_CERT_PUBLICKEY)
+ self.assertEqual(result.subscription_certificate_data, MANAGEMENT_CERT_DATA)
+ self.assertEqual(result.subscription_certificate_thumbprint, self.management_certificate_name)
+
+ def test_add_management_certificate(self):
+ # Arrange
+ public_key = MANAGEMENT_CERT_PUBLICKEY
+ data = MANAGEMENT_CERT_DATA
+
+ # Act
+ result = self.sms.add_management_certificate(public_key, self.management_certificate_name, data)
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertTrue(self._management_certificate_exists(self.management_certificate_name))
+
+ def test_delete_management_certificate(self):
+ # Arrange
+ self._create_management_certificate(self.management_certificate_name)
+
+ # Act
+ result = self.sms.delete_management_certificate(self.management_certificate_name)
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertFalse(self._management_certificate_exists(self.management_certificate_name))
+
+ #--Test cases for affinity groups ------------------------------------
+ def test_list_affinity_groups(self):
+ # Arrange
+ self._create_affinity_group(self.affinity_group_name)
+
+ # Act
+ result = self.sms.list_affinity_groups()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+
+ group = None
+ for temp in result:
+ if temp.name == self.affinity_group_name:
+ group = temp
+ break
+
+ self.assertIsNotNone(group)
+ self.assertIsNotNone(group.name)
+ self.assertIsNotNone(group.label)
+ self.assertIsNotNone(group.description)
+ self.assertIsNotNone(group.location)
+ self.assertIsNotNone(group.capabilities)
+ self.assertTrue(len(group.capabilities) > 0)
+
+ def test_get_affinity_group_properties(self):
+ # Arrange
+ self._create_affinity_group(self.affinity_group_name)
+ self.sms.create_hosted_service(self.hosted_service_name, 'affgrptestlabel', 'affgrptestdesc', None, self.affinity_group_name)
+ self.sms.create_storage_account(self.storage_account_name, self.storage_account_name + 'desc', self.storage_account_name + 'label', self.affinity_group_name)
+
+ # Act
+ result = self.sms.get_affinity_group_properties(self.affinity_group_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertEqual(result.name, self.affinity_group_name)
+ self.assertIsNotNone(result.label)
+ self.assertIsNotNone(result.description)
+ self.assertIsNotNone(result.location)
+ self.assertIsNotNone(result.hosted_services[0])
+ self.assertEqual(result.hosted_services[0].service_name, self.hosted_service_name)
+ self.assertEqual(result.hosted_services[0].hosted_service_properties.affinity_group, self.affinity_group_name)
+ # not sure why azure does not return any storage service
+ self.assertTrue(len(result.capabilities) > 0)
+
+ def test_create_affinity_group(self):
+ # Arrange
+ label = 'tstmgmtaffgrp'
+ description = 'tstmgmt affinity group'
+
+ # Act
+ result = self.sms.create_affinity_group(self.affinity_group_name, label, 'West US', description)
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertTrue(self._affinity_group_exists(self.affinity_group_name))
+
+ def test_update_affinity_group(self):
+ # Arrange
+ self._create_affinity_group(self.affinity_group_name)
+ label = 'tstlabelupdate'
+ description = 'testmgmt affinity group update'
+
+ # Act
+ result = self.sms.update_affinity_group(self.affinity_group_name, label, description)
+
+ # Assert
+ self.assertIsNone(result)
+ props = self.sms.get_affinity_group_properties(self.affinity_group_name)
+ self.assertEqual(props.label, label)
+ self.assertEqual(props.description, description)
+
+ def test_delete_affinity_group(self):
+ # Arrange
+ self._create_affinity_group(self.affinity_group_name)
+
+ # Act
+ result = self.sms.delete_affinity_group(self.affinity_group_name)
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertFalse(self._affinity_group_exists(self.affinity_group_name))
+
+ #--Test cases for locations ------------------------------------------
+ def test_list_locations(self):
+ # Arrange
+
+ # Act
+ result = self.sms.list_locations()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+ self.assertIsNotNone(result[0].name)
+ self.assertIsNotNone(result[0].display_name)
+ self.assertIsNotNone(result[0].available_services)
+ self.assertTrue(len(result[0].available_services) > 0)
+
+ #--Test cases for retrieving operating system information ------------
+ def test_list_operating_systems(self):
+ # Arrange
+
+ # Act
+ result = self.sms.list_operating_systems()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 20)
+ self.assertIsNotNone(result[0].family)
+ self.assertIsNotNone(result[0].family_label)
+ self.assertIsNotNone(result[0].is_active)
+ self.assertIsNotNone(result[0].is_default)
+ self.assertIsNotNone(result[0].label)
+ self.assertIsNotNone(result[0].version)
+
+ def test_list_operating_system_families(self):
+ # Arrange
+
+ # Act
+ result = self.sms.list_operating_system_families()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+ self.assertIsNotNone(result[0].name)
+ self.assertIsNotNone(result[0].label)
+ self.assertTrue(len(result[0].operating_systems) > 0)
+ self.assertIsNotNone(result[0].operating_systems[0].version)
+ self.assertIsNotNone(result[0].operating_systems[0].label)
+ self.assertIsNotNone(result[0].operating_systems[0].is_default)
+ self.assertIsNotNone(result[0].operating_systems[0].is_active)
+
+ #--Test cases for retrieving subscription history --------------------
+ def test_get_subscription(self):
+ # Arrange
+
+ # Act
+ result = self.sms.get_subscription()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertEqual(result.subscription_id, credentials.getSubscriptionId())
+ self.assertIsNotNone(result.account_admin_live_email_id)
+ self.assertIsNotNone(result.service_admin_live_email_id)
+ self.assertIsNotNone(result.subscription_name)
+ self.assertIsNotNone(result.subscription_status)
+ self.assertTrue(result.current_core_count >= 0)
+ self.assertTrue(result.current_hosted_services >= 0)
+ self.assertTrue(result.current_storage_accounts >= 0)
+ self.assertTrue(result.max_core_count > 0)
+ self.assertTrue(result.max_dns_servers > 0)
+ self.assertTrue(result.max_hosted_services > 0)
+ self.assertTrue(result.max_local_network_sites > 0)
+ self.assertTrue(result.max_storage_accounts > 0)
+ self.assertTrue(result.max_virtual_network_sites > 0)
+
+ #--Test cases for virtual machines -----------------------------------
+ def test_get_role(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, self.container_name, role_name + '.vhd')
+
+ # Act
+ result = self.sms.get_role(service_name, deployment_name, role_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertEqual(result.role_name, role_name)
+ self.assertIsNotNone(result.role_size)
+ self.assertIsNotNone(result.role_type)
+ self.assertIsNotNone(result.os_virtual_hard_disk)
+ self.assertIsNotNone(result.os_virtual_hard_disk.disk_label)
+ self.assertIsNotNone(result.os_virtual_hard_disk.disk_name)
+ self.assertIsNotNone(result.os_virtual_hard_disk.host_caching)
+ self.assertIsNotNone(result.os_virtual_hard_disk.media_link)
+ self.assertIsNotNone(result.os_virtual_hard_disk.os)
+ self.assertIsNotNone(result.os_virtual_hard_disk.source_image_name)
+ self.assertIsNotNone(result.data_virtual_hard_disks)
+ self.assertIsNotNone(result.configuration_sets)
+ self.assertIsNotNone(result.configuration_sets[0])
+ self.assertIsNotNone(result.configuration_sets[0].configuration_set_type)
+ self.assertIsNotNone(result.configuration_sets[0].input_endpoints)
+ self.assertIsNotNone(result.configuration_sets[0].input_endpoints[0].protocol)
+ self.assertIsNotNone(result.configuration_sets[0].input_endpoints[0].port)
+ self.assertIsNotNone(result.configuration_sets[0].input_endpoints[0].name)
+ self.assertIsNotNone(result.configuration_sets[0].input_endpoints[0].local_port)
+
+ def test_create_virtual_machine_deployment_linux(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+ image_name = LINUX_IMAGE_NAME
+ media_link = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + self.container_name + '/' + role_name + '.vhd'
+
+ self._create_hosted_service(service_name)
+
+ # Act
+ system = LinuxConfigurationSet('unittest', 'unittest', 'u7;9jbp!', True)
+ system.ssh = None
+
+ os_hd = OSVirtualHardDisk(image_name, media_link)
+
+ network = ConfigurationSet()
+ network.configuration_set_type = 'NetworkConfiguration'
+ network.input_endpoints.input_endpoints.append(ConfigurationSetInputEndpoint('endpnameL', 'tcp', '59913', '3394'))
+
+ result = self.sms.create_virtual_machine_deployment(service_name, deployment_name, 'staging', deployment_name + 'label', role_name, system, os_hd, network_config=network, role_size='Small')
+ self._wait_for_async(result.request_id)
+ self._wait_for_deployment_status(service_name, deployment_name, 'Running')
+
+ # Assert
+ self.assertTrue(self._role_exists(service_name, deployment_name, role_name))
+
+ def test_create_virtual_machine_deployment_windows(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+ image_name = WINDOWS_IMAGE_NAME
+ media_link = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + self.container_name + '/' + role_name + '.vhd'
+
+ self._create_hosted_service(service_name)
+ self._create_service_certificate(service_name, SERVICE_CERT_DATA, 'pfx', SERVICE_CERT_PASSWORD)
+
+ # Act
+ system = WindowsConfigurationSet('unittest', 'u7;9jbp!', False, False, 'Pacific Standard Time')
+ system.domain_join = None
+ system.stored_certificate_settings.stored_certificate_settings.append(CertificateSetting(SERVICE_CERT_THUMBPRINT, 'My', 'LocalMachine'))
+
+ os_hd = OSVirtualHardDisk(image_name, media_link)
+
+ network = ConfigurationSet()
+ network.configuration_set_type = 'NetworkConfiguration'
+ network.input_endpoints.input_endpoints.append(ConfigurationSetInputEndpoint('endpnameW', 'tcp', '59917', '3395'))
+
+ result = self.sms.create_virtual_machine_deployment(service_name, deployment_name, 'staging', deployment_name + 'label', role_name, system, os_hd, network_config=network, role_size='Small')
+ self._wait_for_async(result.request_id)
+ self._wait_for_deployment_status(service_name, deployment_name, 'Running')
+
+ # Assert
+ self.assertTrue(self._role_exists(service_name, deployment_name, role_name))
+
+ def test_add_role_linux(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name1 = self.hosted_service_name + 'a'
+ role_name2 = self.hosted_service_name + 'b'
+
+ self._create_vm_linux(service_name, deployment_name, role_name1, self.container_name, role_name1 + '.vhd')
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name1, 'ReadyRole')
+
+ image_name = LINUX_IMAGE_NAME
+ media_link = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + self.container_name + '/' + role_name2 + '.vhd'
+
+ # Act
+ system = LinuxConfigurationSet('computer2', 'unittest', 'u7;9jbp!', True)
+ system.ssh = None
+
+ os_hd = OSVirtualHardDisk(image_name, media_link)
+
+ result = self.sms.add_role(service_name, deployment_name, role_name2, system, os_hd)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._role_exists(service_name, deployment_name, role_name1))
+ self.assertTrue(self._role_exists(service_name, deployment_name, role_name2))
+
+ def test_add_role_windows(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name1 = self.hosted_service_name + 'a'
+ role_name2 = self.hosted_service_name + 'b'
+
+ self._create_vm_windows(service_name, deployment_name, role_name1, self.container_name, role_name1 + '.vhd')
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name1, 'ReadyRole')
+
+ image_name = WINDOWS_IMAGE_NAME
+ media_link = 'http://' + credentials.getStorageServicesName() + '.blob.core.windows.net/' + self.container_name + '/' + role_name2 + '.vhd'
+
+ # Act
+ system = WindowsConfigurationSet('computer2', 'u7;9jbp!', False, False, 'Pacific Standard Time')
+ system.domain_join = None
+ system.stored_certificate_settings.stored_certificate_settings.append(CertificateSetting(SERVICE_CERT_THUMBPRINT, 'My', 'LocalMachine'))
+
+ os_hd = OSVirtualHardDisk(image_name, media_link)
+
+ result = self.sms.add_role(service_name, deployment_name, role_name2, system, os_hd)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._role_exists(service_name, deployment_name, role_name1))
+ self.assertTrue(self._role_exists(service_name, deployment_name, role_name2))
+
+ def test_update_role(self):
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name, 'ReadyRole')
+
+ network = ConfigurationSet()
+ network.configuration_set_type = 'NetworkConfiguration'
+ network.input_endpoints.input_endpoints.append(ConfigurationSetInputEndpoint('endupdate', 'tcp', '50055', '5555'))
+
+ # Act
+ result = self.sms.update_role(service_name, deployment_name, role_name, network_config=network, role_size='Medium')
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ role = self.sms.get_role(service_name, deployment_name, role_name)
+ self.assertEqual(role.role_size, 'Medium')
+
+ def test_delete_role(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name1 = self.hosted_service_name + 'a'
+ role_name2 = self.hosted_service_name + 'b'
+
+ self._create_vm_windows(service_name, deployment_name, role_name1, 'vhds', role_name1)
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name1, 'ReadyRole')
+
+ self._add_role_windows(service_name, deployment_name, role_name2)
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name2, 'ReadyRole')
+
+ # Act
+ result = self.sms.delete_role(service_name, deployment_name, role_name2)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._role_exists(service_name, deployment_name, role_name1))
+ self.assertFalse(self._role_exists(service_name, deployment_name, role_name2))
+
+ def test_shutdown_start_and_restart_role(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name, 'ReadyRole')
+
+ # Act
+ result = self.sms.shutdown_role(service_name, deployment_name, role_name)
+ self._wait_for_async(result.request_id)
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name, 'StoppedVM')
+
+ # Act
+ result = self.sms.start_role(service_name, deployment_name, role_name)
+ self._wait_for_async(result.request_id)
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name, 'ReadyRole')
+
+ # Act
+ result = self.sms.restart_role(service_name, deployment_name, role_name)
+ self._wait_for_async(result.request_id)
+ self._wait_for_role_instance_status(service_name, deployment_name, role_name, 'ReadyRole')
+
+ def test_capture_role(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+
+ image_name = self.os_image_name
+ image_label = role_name + 'captured'
+
+ # Act
+ result = self.sms.capture_role(service_name, deployment_name, role_name, 'Delete', image_name, image_label)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._os_image_exists(self.os_image_name))
+
+ #--Test cases for virtual machine images -----------------------------
+ def test_list_os_images(self):
+ # Arrange
+ media_url = LINUX_OS_VHD_URL
+ os = 'Linux'
+ self._create_os_image(self.os_image_name, media_url, os)
+
+ # Act
+ result = self.sms.list_os_images()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+
+ image = None
+ for temp in result:
+ if temp.name == self.os_image_name:
+ image = temp
+ break
+
+ self.assertIsNotNone(image)
+ self.assertIsNotNone(image.affinity_group)
+ self.assertIsNotNone(image.category)
+ self.assertIsNotNone(image.description)
+ self.assertIsNotNone(image.eula)
+ self.assertIsNotNone(image.label)
+ self.assertIsNotNone(image.location)
+ self.assertIsNotNone(image.logical_size_in_gb)
+ self.assertEqual(image.media_link, media_url)
+ self.assertEqual(image.name, self.os_image_name)
+ self.assertEqual(image.os, os)
+
+ def test_get_os_image(self):
+ # Arrange
+ media_url = LINUX_OS_VHD_URL
+ os = 'Linux'
+ self._create_os_image(self.os_image_name, media_url, os)
+
+ # Act
+ result = self.sms.get_os_image(self.os_image_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertIsNotNone(result.affinity_group)
+ self.assertIsNotNone(result.category)
+ self.assertIsNotNone(result.description)
+ self.assertIsNotNone(result.eula)
+ self.assertIsNotNone(result.label)
+ self.assertIsNotNone(result.location)
+ self.assertIsNotNone(result.logical_size_in_gb)
+ self.assertEqual(result.media_link, media_url)
+ self.assertEqual(result.name, self.os_image_name)
+ self.assertEqual(result.os, os)
+
+ def test_add_os_image(self):
+ # Arrange
+
+ # Act
+ result = self.sms.add_os_image('utcentosimg', LINUX_OS_VHD_URL, self.os_image_name, 'Linux')
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._os_image_exists(self.os_image_name))
+
+ def test_update_os_image(self):
+ # Arrange
+ self._create_os_image(self.os_image_name, LINUX_OS_VHD_URL, 'Linux')
+
+ # Act
+ result = self.sms.update_os_image(self.os_image_name, 'newlabel', LINUX_OS_VHD_URL, self.os_image_name, 'Linux')
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ image = self.sms.get_os_image(self.os_image_name)
+ self.assertEqual(image.label, 'newlabel')
+ self.assertEqual(image.os, 'Linux')
+
+ def test_delete_os_image(self):
+ # Arrange
+ self._create_os_image(self.os_image_name, LINUX_OS_VHD_URL, 'Linux')
+
+ # Act
+ result = self.sms.delete_os_image(self.os_image_name)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertFalse(self._os_image_exists(self.os_image_name))
+
+ #--Test cases for virtual machine disks ------------------------------
+ def test_get_data_disk(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+
+ lun = 1
+ self._add_data_disk_from_blob_url(service_name, deployment_name, role_name, lun, 'mylabel')
+ self.data_disk_info = (service_name, deployment_name, role_name, lun)
+
+ # Act
+ result = self.sms.get_data_disk(service_name, deployment_name, role_name, lun)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertEqual(result.disk_label, 'mylabel')
+ self.assertIsNotNone(result.disk_name)
+ self.assertIsNotNone(result.host_caching)
+ self.assertIsNotNone(result.logical_disk_size_in_gb)
+ self.assertEqual(result.lun, lun)
+ self.assertIsNotNone(result.media_link)
+
+ def test_add_data_disk_from_disk_name(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+
+ lun = 2
+ url = self._upload_disk_to_storage_blob('disk')
+ self._create_disk(self.disk_name, 'Windows', url)
+ self.data_disk_info = (service_name, deployment_name, role_name, lun)
+
+ # Act
+ result = self.sms.add_data_disk(service_name, deployment_name, role_name, lun, None, None, 'testdisklabel', self.disk_name)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._data_disk_exists(service_name, deployment_name, role_name, lun))
+
+ def test_add_data_disk_from_blob_url(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+
+ lun = 3
+ label = 'disk' + str(lun)
+ url = self._upload_disk_to_storage_blob('disk')
+ self.data_disk_info = (service_name, deployment_name, role_name, lun)
+
+ # Act
+ result = self.sms.add_data_disk(service_name, deployment_name, role_name, lun, None, None, label, None, None, url)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertTrue(self._data_disk_exists(service_name, deployment_name, role_name, lun))
+
+ def test_update_data_disk(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+
+ lun = 1
+ updated_lun = 10
+ self._add_data_disk_from_blob_url(service_name, deployment_name, role_name, lun, 'mylabel')
+ self.data_disk_info = (service_name, deployment_name, role_name, lun)
+
+ # Act
+ result = self.sms.update_data_disk(service_name, deployment_name, role_name, lun, None, None, updated_lun)
+ self._wait_for_async(result.request_id)
+ self.data_disk_info = (service_name, deployment_name, role_name, updated_lun)
+
+ # Assert
+ self.assertFalse(self._data_disk_exists(service_name, deployment_name, role_name, lun))
+ self.assertTrue(self._data_disk_exists(service_name, deployment_name, role_name, updated_lun))
+
+ def test_delete_data_disk(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+
+ lun = 5
+ url = self._upload_disk_to_storage_blob('disk')
+ self._create_disk(self.disk_name, 'Windows', url)
+ result = self.sms.add_data_disk(service_name, deployment_name, role_name, lun, None, None, 'testdisklabel', self.disk_name)
+ self._wait_for_async(result.request_id)
+
+ # Act
+ result = self.sms.delete_data_disk(service_name, deployment_name, role_name, lun)
+ self._wait_for_async(result.request_id)
+
+ # Assert
+ self.assertFalse(self._data_disk_exists(service_name, deployment_name, role_name, lun))
+
+ #--Test cases for virtual machine disks ------------------------------
+ def test_list_disks(self):
+ # Arrange
+ url = self._upload_disk_to_storage_blob('disk')
+ self._create_disk(self.disk_name, 'Windows', url)
+
+ # Act
+ result = self.sms.list_disks()
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertTrue(len(result) > 0)
+
+ disk = None
+ for temp in result:
+ if temp.name == self.disk_name:
+ disk = temp
+ break
+
+ self.assertIsNotNone(disk)
+ self.assertIsNotNone(disk.os)
+ self.assertIsNotNone(disk.location)
+ self.assertIsNotNone(disk.logical_disk_size_in_gb)
+ self.assertIsNotNone(disk.media_link)
+ self.assertIsNotNone(disk.name)
+ self.assertIsNotNone(disk.source_image_name)
+
+ def test_get_disk_unattached(self):
+ # Arrange
+ url = self._upload_disk_to_storage_blob('disk')
+ self._create_disk(self.disk_name, 'Windows', url)
+
+ # Act
+ result = self.sms.get_disk(self.disk_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertIsNotNone(result.os)
+ self.assertIsNotNone(result.location)
+ self.assertIsNotNone(result.logical_disk_size_in_gb)
+ self.assertEqual(result.media_link, url)
+ self.assertEqual(result.name, self.disk_name)
+ self.assertIsNotNone(result.source_image_name)
+ self.assertIsNone(result.attached_to)
+
+ def test_get_disk_attached(self):
+ # Arrange
+ service_name = self.hosted_service_name
+ deployment_name = self.hosted_service_name
+ role_name = self.hosted_service_name
+
+ self._create_vm_windows(service_name, deployment_name, role_name, 'vhds', self.hosted_service_name)
+
+ lun = 6
+ url = self._upload_disk_to_storage_blob('disk')
+ self._create_disk(self.disk_name, 'Windows', url)
+ self.data_disk_info = (service_name, deployment_name, role_name, lun)
+ result = self.sms.add_data_disk(service_name, deployment_name, role_name, lun, None, None, 'testdisklabel', self.disk_name)
+ self._wait_for_async(result.request_id)
+
+ # Act
+ result = self.sms.get_disk(self.disk_name)
+
+ # Assert
+ self.assertIsNotNone(result)
+ self.assertIsNotNone(result.os)
+ self.assertIsNotNone(result.location)
+ self.assertIsNotNone(result.logical_disk_size_in_gb)
+ self.assertIsNotNone(result.media_link)
+ self.assertIsNotNone(result.name)
+ self.assertIsNotNone(result.source_image_name)
+ self.assertIsNotNone(result.attached_to)
+ self.assertEqual(result.attached_to.deployment_name, deployment_name)
+ self.assertEqual(result.attached_to.hosted_service_name, service_name)
+ self.assertEqual(result.attached_to.role_name, role_name)
+
+ def test_add_disk(self):
+ # Arrange
+ url = self._upload_disk_to_storage_blob('disk')
+
+ # Act
+ result = self.sms.add_disk(False, 'ptvslabel', url, self.disk_name, 'Windows')
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertTrue(self._disk_exists(self.disk_name))
+
+ def test_update_disk(self):
+ # Arrange
+ url = self._upload_disk_to_storage_blob('disk')
+ urlupdate = self._upload_disk_to_storage_blob('diskupdate')
+ self._create_disk(self.disk_name, 'Windows', url)
+
+ # Act
+ result = self.sms.update_disk(self.disk_name, False, 'ptvslabelupdate', urlupdate, self.disk_name, 'Windows')
+
+ # Assert
+ self.assertIsNone(result)
+ disk = self.sms.get_disk(self.disk_name)
+ self.assertEqual(disk.name, self.disk_name)
+ self.assertEqual(disk.label, 'ptvslabelupdate')
+ self.assertEqual(disk.media_link, url)
+
+ def test_delete_disk(self):
+ # Arrange
+ url = self._upload_disk_to_storage_blob('disk')
+ self._create_disk(self.disk_name, 'Windows', url)
+
+ # Act
+ result = self.sms.delete_disk(self.disk_name)
+
+ # Assert
+ self.assertIsNone(result)
+ self.assertFalse(self._disk_exists(self.disk_name))
+
+#------------------------------------------------------------------------------
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/azuretest/test_sharedaccesssignature.py b/test/azuretest/test_sharedaccesssignature.py
new file mode 100644
index 000000000000..76c40cf7e900
--- /dev/null
+++ b/test/azuretest/test_sharedaccesssignature.py
@@ -0,0 +1,137 @@
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
+
+from azure import DEV_ACCOUNT_NAME, DEV_ACCOUNT_KEY
+from azure.storage.sharedaccesssignature import (SharedAccessSignature,
+ SharedAccessPolicy,
+ Permission,
+ WebResource)
+from azure.storage import AccessPolicy
+from azuretest.util import AzureTestCase
+
+import unittest
+
+#------------------------------------------------------------------------------
+SIGNED_START = 'st'
+SIGNED_EXPIRY = 'se'
+SIGNED_RESOURCE = 'sr'
+SIGNED_PERMISSION = 'sp'
+SIGNED_IDENTIFIER = 'si'
+SIGNED_SIGNATURE = 'sig'
+RESOURCE_BLOB = 'blob'
+RESOURCE_CONTAINER = 'container'
+SIGNED_RESOURCE_TYPE = 'resource'
+SHARED_ACCESS_PERMISSION = 'permission'
+
+#------------------------------------------------------------------------------
+class SharedAccessSignatureTest(AzureTestCase):
+
+ def setUp(self):
+ self.sas = SharedAccessSignature(account_name=DEV_ACCOUNT_NAME,
+ account_key=DEV_ACCOUNT_KEY)
+ def tearDown(self):
+ return super(SharedAccessSignatureTest, self).tearDown()
+
+ def test_generate_signature_container(self):
+ accss_plcy = AccessPolicy()
+ accss_plcy.start = '2011-10-11'
+ accss_plcy.expiry = '2011-10-12'
+ accss_plcy.permission = 'r'
+ signed_identifier = 'YWJjZGVmZw=='
+ sap = SharedAccessPolicy(accss_plcy, signed_identifier)
+ signature = self.sas._generate_signature('images',
+ RESOURCE_CONTAINER,
+ sap)
+ self.assertEqual(signature,
+ 'VdlALM4TYEYYNf94Bvt3dn48TsA01wk45ltwP3zeKp4=')
+
+ def test_generate_signature_blob(self):
+ accss_plcy = AccessPolicy()
+ accss_plcy.start = '2011-10-11T11:03:40Z'
+ accss_plcy.expiry = '2011-10-12T11:53:40Z'
+ accss_plcy.permission = 'r'
+ sap = SharedAccessPolicy(accss_plcy)
+
+ signature = self.sas._generate_signature('images/pic1.png',
+ RESOURCE_BLOB,
+ sap)
+ self.assertEqual(signature,
+ '7NIEip+VOrQ5ZV80pORPK1MOsJc62wwCNcbMvE+lQ0s=')
+
+ def test_blob_signed_query_string(self):
+ accss_plcy = AccessPolicy()
+ accss_plcy.start = '2011-10-11'
+ accss_plcy.expiry = '2011-10-12'
+ accss_plcy.permission = 'w'
+ sap = SharedAccessPolicy(accss_plcy)
+ qry_str = self.sas.generate_signed_query_string('images/pic1.png',
+ RESOURCE_BLOB,
+ sap)
+ self.assertEqual(qry_str[SIGNED_START], '2011-10-11')
+ self.assertEqual(qry_str[SIGNED_EXPIRY], '2011-10-12')
+ self.assertEqual(qry_str[SIGNED_RESOURCE], RESOURCE_BLOB)
+ self.assertEqual(qry_str[SIGNED_PERMISSION], 'w')
+ self.assertEqual(qry_str[SIGNED_SIGNATURE],
+ 'k8uyTrn3pgLXuhwgZhxeAH6mZ/es9k2vqHPJEuIH4CE=')
+
+ def test_container_signed_query_string(self):
+ accss_plcy = AccessPolicy()
+ accss_plcy.start = '2011-10-11'
+ accss_plcy.expiry = '2011-10-12'
+ accss_plcy.permission = 'r'
+ signed_identifier = 'YWJjZGVmZw=='
+ sap = SharedAccessPolicy(accss_plcy, signed_identifier)
+ qry_str = self.sas.generate_signed_query_string('images',
+ RESOURCE_CONTAINER,
+ sap)
+ self.assertEqual(qry_str[SIGNED_START], '2011-10-11')
+ self.assertEqual(qry_str[SIGNED_EXPIRY], '2011-10-12')
+ self.assertEqual(qry_str[SIGNED_RESOURCE], RESOURCE_CONTAINER)
+ self.assertEqual(qry_str[SIGNED_PERMISSION], 'r')
+ self.assertEqual(qry_str[SIGNED_IDENTIFIER], 'YWJjZGVmZw==')
+ self.assertEqual(qry_str[SIGNED_SIGNATURE],
+ 'VdlALM4TYEYYNf94Bvt3dn48TsA01wk45ltwP3zeKp4=')
+
+ def test_sign_request(self):
+ accss_plcy = AccessPolicy()
+ accss_plcy.start = '2011-10-11'
+ accss_plcy.expiry = '2011-10-12'
+ accss_plcy.permission = 'r'
+ sap = SharedAccessPolicy(accss_plcy)
+ qry_str = self.sas.generate_signed_query_string('images/pic1.png',
+ RESOURCE_BLOB,
+ sap)
+
+ permission = Permission()
+ permission.path = '/images/pic1.png'
+ permission.query_string = qry_str
+ self.sas.permission_set = [permission]
+
+ web_rsrc = WebResource()
+ web_rsrc.properties[SIGNED_RESOURCE_TYPE] = RESOURCE_BLOB
+ web_rsrc.properties[SHARED_ACCESS_PERMISSION] = 'r'
+ web_rsrc.path = '/images/pic1.png?comp=metadata'
+ web_rsrc.request_url = '/images/pic1.png?comp=metadata'
+
+ web_rsrc = self.sas.sign_request(web_rsrc)
+
+ self.assertEqual(web_rsrc.request_url,
+ '/images/pic1.png?comp=metadata&' +
+ self.sas._convert_query_string(qry_str))
+
+#------------------------------------------------------------------------------
+if __name__ == '__main__':
+ unittest.main()
+
\ No newline at end of file
diff --git a/test/azuretest/test_tableservice.py b/test/azuretest/test_tableservice.py
index 4d38a76936e2..49be76c057ff 100644
--- a/test/azuretest/test_tableservice.py
+++ b/test/azuretest/test_tableservice.py
@@ -1,5 +1,5 @@
-#-------------------------------------------------------------------------
-# Copyright 2011 Microsoft Corporation
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,83 +17,187 @@
from azure.storage import EntityProperty, Entity, StorageServiceProperties
from azure import WindowsAzureError
-
from azuretest.util import (credentials,
getUniqueTestRunID,
STATUS_OK,
STATUS_CREATED,
STATUS_ACCEPTED,
STATUS_NO_CONTENT,
- getUniqueNameBasedOnCurrentTime)
+ getUniqueNameBasedOnCurrentTime,
+ AzureTestCase)
import unittest
import time
from datetime import datetime
#------------------------------------------------------------------------------
-__uid = getUniqueTestRunID()
-
-TABLE_TO_DELETE = 'mytesttabletodelete%s' % (__uid)
-TABLE_NO_DELETE = 'mytesttablenodelete%s' % (__uid)
-ENTITY_TO_DELETE = 'mytestentitytodelete%s' % (__uid)
-ENTITY_NO_DELETE = 'mytestentitynodelete%s' % (__uid)
-BATCH_TABLE = 'mytestbatchtable%s' % (__uid)
-FILTER_TABLE = 'mytestfiltertable%s' % (__uid)
+
+MAX_RETRY = 60
#------------------------------------------------------------------------------
-class StorageTest(unittest.TestCase):
- '''
- TODO:
- - comprehensive, positive test cases for all table client methods
- - comprehensive, negative test cases all table client methods
- - missing coverage for begin_batch
- - missing coverage for cancel_batch
- - missing coverage for commit_batch
- - get_table_service_properties busted
- - set_table_service_properties busted
- '''
+class StorageTest(AzureTestCase):
def setUp(self):
- self.tc = TableService(account_name=credentials.getStorageServicesName().encode('ascii', 'ignore'),
- account_key=credentials.getStorageServicesKey().encode('ascii', 'ignore'))
+ self.tc = TableService(account_name=credentials.getStorageServicesName(),
+ account_key=credentials.getStorageServicesKey())
- __uid = getUniqueTestRunID()
- test_table_base_name = u'testtable%s' % (__uid)
- self.test_table = getUniqueNameBasedOnCurrentTime(test_table_base_name)
- self.tc.create_table(self.test_table)
+ proxy_host = credentials.getProxyHost()
+ proxy_port = credentials.getProxyPort()
+ if proxy_host:
+ self.tc.set_proxy(proxy_host, proxy_port)
- #time.sleep(10)
+ __uid = getUniqueTestRunID()
+ table_base_name = u'testtable%s' % (__uid)
+ self.table_name = getUniqueNameBasedOnCurrentTime(table_base_name)
+ self.additional_table_names = []
def tearDown(self):
self.cleanup()
return super(StorageTest, self).tearDown()
def cleanup(self):
- for cont in [TABLE_NO_DELETE, TABLE_TO_DELETE]:
- try: self.tc.delete_table(cont)
+ try:
+ self.tc.delete_table(self.table_name)
+ except: pass
+
+ for name in self.additional_table_names:
+ try:
+ self.tc.delete_table(name)
except: pass
- self.tc.delete_table(self.test_table)
- def test_sanity(self):
- self.sanity_create_table()
- time.sleep(10)
- self.sanity_query_tables()
-
- self.sanity_delete_table()
-
- self.sanity_insert_entity()
- self.sanity_get_entity()
- self.sanity_query_entities()
- self.sanity_update_entity()
- self.sanity_insert_or_merge_entity()
- self.sanity_insert_or_replace_entity()
- self.sanity_merge_entity()
- self.sanity_delete_entity()
-
- self.sanity_begin_batch()
- self.sanity_commit_batch()
- self.sanity_cancel_batch()
-
- def test_sanity_get_set_table_service_properties(self):
+ #--Helpers-----------------------------------------------------------------
+ def _create_table(self, table_name):
+ '''
+ Creates a table with the specified name.
+ '''
+ self.tc.create_table(table_name, True)
+
+ def _create_table_with_default_entities(self, table_name, entity_count):
+ '''
+ Creates a table with the specified name and adds entities with the
+ default set of values. PartitionKey is set to 'MyPartition' and RowKey
+ is set to a unique counter value starting at 1 (as a string).
+ '''
+ entities = []
+ self._create_table(table_name)
+ for i in range(1, entity_count + 1):
+ entities.append(self.tc.insert_entity(table_name, self._create_default_entity_dict('MyPartition', str(i))))
+ return entities
+
+ def _create_default_entity_class(self, partition, row):
+ '''
+ Creates a class-based entity with fixed values, using all
+ of the supported data types.
+ '''
+ # TODO: Edm.Binary and null
+ entity = Entity()
+ entity.PartitionKey = partition
+ entity.RowKey = row
+ entity.age = 39
+ entity.sex = 'male'
+ entity.married = True
+ entity.deceased = False
+ entity.optional = None
+ entity.ratio = 3.1
+ entity.large = 9333111000
+ entity.Birthday = datetime(1973,10,04)
+ entity.birthday = datetime(1970,10,04)
+ entity.binary = None
+ entity.other = EntityProperty('Edm.Int64', 20)
+ entity.clsid = EntityProperty('Edm.Guid', 'c9da6455-213d-42c9-9a79-3e9149a57833')
+ return entity
+
+ def _create_default_entity_dict(self, partition, row):
+ '''
+ Creates a dictionary-based entity with fixed values, using all
+ of the supported data types.
+ '''
+ # TODO: Edm.Binary and null
+ return {'PartitionKey':partition,
+ 'RowKey':row,
+ 'age':39,
+ 'sex':'male',
+ 'married':True,
+ 'deceased':False,
+ 'optional':None,
+ 'ratio':3.1,
+ 'large':9333111000,
+ 'Birthday':datetime(1973,10,04),
+ 'birthday':datetime(1970,10,04),
+ 'binary':EntityProperty('Edm.Binary', None),
+ 'other':EntityProperty('Edm.Int64', 20),
+ 'clsid':EntityProperty('Edm.Guid', 'c9da6455-213d-42c9-9a79-3e9149a57833')}
+
+ def _create_updated_entity_dict(self, partition, row):
+ '''
+ Creates a dictionary-based entity with fixed values, with a
+ different set of values than the default entity. It
+ adds fields, changes field values, changes field types,
+ and removes fields when compared to the default entity.
+ '''
+ return {'PartitionKey':partition,
+ 'RowKey':row,
+ 'age':'abc',
+ 'sex':'female',
+ 'sign':'aquarius',
+ 'birthday':datetime(1991,10,04)}
+
+ def _assert_default_entity(self, entity):
+ '''
+ Asserts that the entity passed in matches the default entity.
+ '''
+ self.assertEquals(entity.age, 39)
+ self.assertEquals(entity.sex, 'male')
+ self.assertEquals(entity.married, True)
+ self.assertEquals(entity.deceased, False)
+ self.assertFalse(hasattr(entity, "aquarius"))
+ self.assertEquals(entity.ratio, 3.1)
+ self.assertEquals(entity.large, 9333111000)
+ self.assertEquals(entity.Birthday, datetime(1973,10,04))
+ self.assertEquals(entity.birthday, datetime(1970,10,04))
+ self.assertEquals(entity.other, 20)
+ self.assertIsInstance(entity.clsid, EntityProperty)
+ self.assertEquals(entity.clsid.type, 'Edm.Guid')
+ self.assertEquals(entity.clsid.value, 'c9da6455-213d-42c9-9a79-3e9149a57833')
+
+ def _assert_updated_entity(self, entity):
+ '''
+ Asserts that the entity passed in matches the updated entity.
+ '''
+ self.assertEquals(entity.age, 'abc')
+ self.assertEquals(entity.sex, 'female')
+ self.assertFalse(hasattr(entity, "married"))
+ self.assertFalse(hasattr(entity, "deceased"))
+ self.assertEquals(entity.sign, 'aquarius')
+ self.assertFalse(hasattr(entity, "optional"))
+ self.assertFalse(hasattr(entity, "ratio"))
+ self.assertFalse(hasattr(entity, "large"))
+ self.assertFalse(hasattr(entity, "Birthday"))
+ self.assertEquals(entity.birthday, datetime(1991,10,04))
+ self.assertFalse(hasattr(entity, "other"))
+ self.assertFalse(hasattr(entity, "clsid"))
+
+ def _assert_merged_entity(self, entity):
+ '''
+ Asserts that the entity passed in matches the default entity
+ merged with the updated entity.
+ '''
+ self.assertEquals(entity.age, 'abc')
+ self.assertEquals(entity.sex, 'female')
+ self.assertEquals(entity.sign, 'aquarius')
+ self.assertEquals(entity.married, True)
+ self.assertEquals(entity.deceased, False)
+ self.assertEquals(entity.sign, 'aquarius')
+ self.assertEquals(entity.ratio, 3.1)
+ self.assertEquals(entity.large, 9333111000)
+ self.assertEquals(entity.Birthday, datetime(1973,10,04))
+ self.assertEquals(entity.birthday, datetime(1991,10,04))
+ self.assertEquals(entity.other, 20)
+ self.assertIsInstance(entity.clsid, EntityProperty)
+ self.assertEquals(entity.clsid.type, 'Edm.Guid')
+ self.assertEquals(entity.clsid.value, 'c9da6455-213d-42c9-9a79-3e9149a57833')
+
+ #--Test cases for table service -------------------------------------------
+ def test_get_set_table_service_properties(self):
table_properties = self.tc.get_table_service_properties()
self.tc.set_table_service_properties(table_properties)
@@ -114,10 +218,16 @@ def test_sanity_get_set_table_service_properties(self):
setattr(cur, last_attr, value)
self.tc.set_table_service_properties(table_properties)
- table_properties = self.tc.get_table_service_properties()
- cur = table_properties
- for component in path.split('.'):
- cur = getattr(cur, component)
+ retry_count = 0
+ while retry_count < MAX_RETRY:
+ table_properties = self.tc.get_table_service_properties()
+ cur = table_properties
+ for component in path.split('.'):
+ cur = getattr(cur, component)
+ if value == cur:
+ break
+ time.sleep(1)
+ retry_count += 1
self.assertEquals(value, cur)
@@ -150,260 +260,501 @@ def test_table_service_set_both(self):
self.assertEquals(5, table_properties.logging.retention_policy.days)
+ #--Test cases for tables --------------------------------------------------
+ def test_create_table(self):
+ # Arrange
- #--Helpers-----------------------------------------------------------------
- def sanity_create_table(self):
- resp = self.tc.create_table(TABLE_TO_DELETE)
- self.assertTrue(resp)
- #self.assertEqual(resp.cache_control, u'no-cache')
-
- resp = self.tc.create_table(TABLE_NO_DELETE)
- self.assertTrue(resp)
- #self.assertEqual(resp.cache_control, u'no-cache')
-
- def sanity_query_tables(self):
- resp = self.tc.query_tables()
- self.assertEqual(type(resp), list)
- tableNames = [x.name for x in resp]
- self.assertGreaterEqual(len(tableNames), 2)
- self.assertIn(TABLE_NO_DELETE, tableNames)
- self.assertIn(TABLE_TO_DELETE, tableNames)
-
- def sanity_delete_table(self):
- resp = self.tc.delete_table(TABLE_TO_DELETE)
- self.assertTrue(resp)
-
- def sanity_insert_entity(self):
- resp = self.tc.insert_entity(TABLE_NO_DELETE, {'PartitionKey':'Lastname',
- 'RowKey':'Firstname',
- 'age':39,
- 'sex':'male',
- 'birthday':datetime(1973,10,04)})
- self.assertEquals(resp, None)
+ # Act
+ created = self.tc.create_table(self.table_name)
- entity = Entity()
- entity.PartitionKey = 'Lastname'
- entity.RowKey = 'Firstname1'
- entity.age = 39
- entity.Birthday = EntityProperty('Edm.Int64', 20)
-
- resp = self.tc.insert_entity(TABLE_NO_DELETE, entity)
- self.assertEquals(resp, None)
-
- def sanity_get_entity(self):
- ln = u'Lastname'
- fn1 = u'Firstname1'
- resp = self.tc.get_entity(TABLE_NO_DELETE,
- ln,
- fn1,
- '')
- self.assertEquals(resp.PartitionKey, ln)
- self.assertEquals(resp.RowKey, fn1)
- self.assertEquals(resp.age, 39)
- self.assertEquals(resp.Birthday, 20)
+ # Assert
+ self.assertTrue(created)
- def sanity_query_entities(self):
- resp = self.tc.query_entities(TABLE_NO_DELETE, '', '')
- self.assertEquals(len(resp), 2)
- self.assertEquals(resp[0].birthday, datetime(1973, 10, 04))
- self.assertEquals(resp[1].Birthday, 20)
-
- def sanity_update_entity(self):
- ln = u'Lastname'
- fn = u'Firstname'
- resp = self.tc.update_entity(TABLE_NO_DELETE,
- ln,
- fn,
- {'PartitionKey':'Lastname',
- 'RowKey':'Firstname',
- 'age':21,
- 'sex':'female',
- 'birthday':datetime(1991,10,04)})
- self.assertEquals(resp, None)
-
- resp = self.tc.get_entity(TABLE_NO_DELETE,
- ln,
- fn,
- '')
- self.assertEquals(resp.PartitionKey, ln)
- self.assertEquals(resp.RowKey, fn)
- self.assertEquals(resp.age, 21)
- self.assertEquals(resp.sex, u'female')
- self.assertEquals(resp.birthday, datetime(1991, 10, 04))
-
- def sanity_insert_or_merge_entity(self):
- ln = u'Lastname'
- fn = u'Firstname'
- resp = self.tc.insert_or_merge_entity(TABLE_NO_DELETE,
- ln,
- fn,
- {'PartitionKey':'Lastname',
- 'RowKey':'Firstname',
- 'age': u'abc', #changed type
- 'sex':'male', #changed value
- 'birthday':datetime(1991,10,04),
- 'sign' : 'aquarius' #new
- })
- self.assertEquals(resp, None)
+ def test_create_table_fail_on_exist(self):
+ # Arrange
+
+ # Act
+ created = self.tc.create_table(self.table_name, True)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_create_table_with_already_existing_table(self):
+ # Arrange
+
+ # Act
+ created1 = self.tc.create_table(self.table_name)
+ created2 = self.tc.create_table(self.table_name)
+
+ # Assert
+ self.assertTrue(created1)
+ self.assertFalse(created2)
+
+ def test_create_table_with_already_existing_table_fail_on_exist(self):
+ # Arrange
+
+ # Act
+ created = self.tc.create_table(self.table_name)
+ with self.assertRaises(WindowsAzureError):
+ self.tc.create_table(self.table_name, True)
+
+ # Assert
+ self.assertTrue(created)
+
+ def test_query_tables(self):
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
+ tables = self.tc.query_tables()
+ for table in tables:
+ pass
- resp = self.tc.get_entity(TABLE_NO_DELETE,
- ln,
- fn,
- '')
- self.assertEquals(resp.PartitionKey, ln)
- self.assertEquals(resp.RowKey, fn)
- self.assertEquals(resp.age, u'abc')
- self.assertEquals(resp.sex, u'male')
- self.assertEquals(resp.birthday, datetime(1991, 10, 4))
- self.assertEquals(resp.sign, u'aquarius')
-
- def sanity_insert_or_replace_entity(self):
- ln = u'Lastname'
- fn = u'Firstname'
- resp = self.tc.insert_or_replace_entity(TABLE_NO_DELETE,
- ln,
- fn,
- {'PartitionKey':'Lastname',
- 'RowKey':'Firstname',
- 'age':1,
- 'sex':'male'})
- self.assertEquals(resp, None)
-
- resp = self.tc.get_entity(TABLE_NO_DELETE,
- ln,
- fn,
- '')
- self.assertEquals(resp.PartitionKey, ln)
- self.assertEquals(resp.RowKey, fn)
- self.assertEquals(resp.age, 1)
- self.assertEquals(resp.sex, u'male')
- self.assertFalse(hasattr(resp, "birthday"))
- self.assertFalse(hasattr(resp, "sign"))
-
- def sanity_merge_entity(self):
- ln = u'Lastname'
- fn = u'Firstname'
- resp = self.tc.merge_entity(TABLE_NO_DELETE,
- ln,
- fn,
- {'PartitionKey':'Lastname',
- 'RowKey':'Firstname',
- 'sex':'female',
- 'fact': 'nice person'})
- self.assertEquals(resp, None)
-
- resp = self.tc.get_entity(TABLE_NO_DELETE,
- ln,
- fn,
- '')
- self.assertEquals(resp.PartitionKey, ln)
- self.assertEquals(resp.RowKey, fn)
- self.assertEquals(resp.age, 1)
- self.assertEquals(resp.sex, u'female')
- self.assertEquals(resp.fact, u'nice person')
+ # Assert
+ tableNames = [x.name for x in tables]
+ self.assertGreaterEqual(len(tableNames), 1)
+ self.assertGreaterEqual(len(tables), 1)
+ self.assertIn(self.table_name, tableNames)
+
+ def test_query_tables_with_table_name(self):
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
+ tables = self.tc.query_tables(self.table_name)
+ for table in tables:
+ pass
+
+ # Assert
+ self.assertEqual(len(tables), 1)
+ self.assertEqual(tables[0].name, self.table_name)
+
+ def test_query_tables_with_table_name_no_tables(self):
+ # Arrange
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ self.tc.query_tables(self.table_name)
+
+ # Assert
+
+ def test_query_tables_with_top(self):
+ # Arrange
+ self.additional_table_names = [self.table_name + suffix for suffix in 'abcd']
+ for name in self.additional_table_names:
+ self.tc.create_table(name)
+
+ # Act
+ tables = self.tc.query_tables(None, 3)
+ for table in tables:
+ pass
+
+ # Assert
+ self.assertEqual(len(tables), 3)
+
+ def test_query_tables_with_top_and_next_table_name(self):
+ # Arrange
+ self.additional_table_names = [self.table_name + suffix for suffix in 'abcd']
+ for name in self.additional_table_names:
+ self.tc.create_table(name)
+
+ # Act
+ tables_set1 = self.tc.query_tables(None, 3)
+ tables_set2 = self.tc.query_tables(None, 3, tables_set1.x_ms_continuation['NextTableName'])
+
+ # Assert
+ self.assertEqual(len(tables_set1), 3)
+ self.assertGreaterEqual(len(tables_set2), 1)
+ self.assertLessEqual(len(tables_set2), 3)
+
+ def test_delete_table_with_existing_table(self):
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
+ deleted = self.tc.delete_table(self.table_name)
+
+ # Assert
+ self.assertTrue(deleted)
+ tables = self.tc.query_tables()
+ self.assertNamedItemNotInContainer(tables, self.table_name)
+
+ def test_delete_table_with_existing_table_fail_not_exist(self):
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
+ deleted = self.tc.delete_table(self.table_name, True)
+
+ # Assert
+ self.assertTrue(deleted)
+ tables = self.tc.query_tables()
+ self.assertNamedItemNotInContainer(tables, self.table_name)
+
+ def test_delete_table_with_non_existing_table(self):
+ # Arrange
+
+ # Act
+ deleted = self.tc.delete_table(self.table_name)
+
+ # Assert
+ self.assertFalse(deleted)
+
+ def test_delete_table_with_non_existing_table_fail_not_exist(self):
+ # Arrange
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ self.tc.delete_table(self.table_name, True)
+
+ # Assert
+
+ #--Test cases for entities ------------------------------------------
+ def test_insert_entity_dictionary(self):
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
+ dict = self._create_default_entity_dict('MyPartition', '1')
+ resp = self.tc.insert_entity(self.table_name, dict)
+
+ # Assert
+ self.assertIsNotNone(resp)
+
+ def test_insert_entity_class_instance(self):
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
+ entity = self._create_default_entity_class('MyPartition', '1')
+ resp = self.tc.insert_entity(self.table_name, entity)
+
+ # Assert
+ self.assertIsNotNone(resp)
+
+ def test_insert_entity_conflict(self):
+ # Arrange
+ self._create_table_with_default_entities(self.table_name, 1)
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ self.tc.insert_entity(self.table_name, self._create_default_entity_dict('MyPartition', '1'))
+
+ # Assert
+
+ def test_get_entity(self):
+ # Arrange
+ self._create_table_with_default_entities(self.table_name, 1)
+
+ # Act
+ resp = self.tc.get_entity(self.table_name, 'MyPartition', '1')
+
+ # Assert
+ self.assertEquals(resp.PartitionKey, 'MyPartition')
+ self.assertEquals(resp.RowKey, '1')
+ self._assert_default_entity(resp)
+
+ def test_get_entity_not_existing(self):
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ self.tc.get_entity(self.table_name, 'MyPartition', '1')
+
+ # Assert
+
+ def test_get_entity_with_select(self):
+ # Arrange
+ self._create_table_with_default_entities(self.table_name, 1)
+
+ # Act
+ resp = self.tc.get_entity(self.table_name, 'MyPartition', '1', 'age,sex')
+
+ # Assert
+ self.assertEquals(resp.age, 39)
+ self.assertEquals(resp.sex, 'male')
self.assertFalse(hasattr(resp, "birthday"))
+ self.assertFalse(hasattr(resp, "married"))
+ self.assertFalse(hasattr(resp, "deceased"))
- def sanity_delete_entity(self):
- ln = u'Lastname'
- fn = u'Firstname'
- resp = self.tc.delete_entity(TABLE_NO_DELETE,
- ln,
- fn)
- self.assertEquals(resp, None)
+ def test_query_entities(self):
+ # Arrange
+ self._create_table_with_default_entities(self.table_name, 2)
- self.assertRaises(WindowsAzureError,
- lambda: self.tc.get_entity(TABLE_NO_DELETE, ln, fn, ''))
-
- def test_batch_partition_key(self):
- tn = BATCH_TABLE + 'pk'
- self.tc.create_table(tn)
- try:
- self.tc.begin_batch()
- self.tc.insert_entity(TABLE_NO_DELETE, {'PartitionKey':'Lastname',
- 'RowKey':'Firstname',
- 'age':39,
- 'sex':'male',
- 'birthday':datetime(1973,10,04)})
-
- self.tc.insert_entity(TABLE_NO_DELETE, {'PartitionKey':'Lastname',
- 'RowKey':'Firstname2',
- 'age':39,
- 'sex':'male',
- 'birthday':datetime(1973,10,04)})
-
- self.tc.commit_batch()
- finally:
- self.tc.delete_table(tn)
-
- def test_sanity_batch(self):
- return
- self.tc.create_table(BATCH_TABLE)
-
- #resp = self.tc.begin_batch()
- #self.assertEquals(resp, None)
-
- resp = self.tc.insert_entity(BATCH_TABLE, {'PartitionKey':'Lastname',
- 'RowKey':'Firstname',
- 'age':39,
- 'sex':'male',
- 'birthday':datetime(1973,10,04)})
-
- #resp = self.tc.insert_entity(BATCH_TABLE, {'PartitionKey':'Lastname',
- # 'RowKey':'Firstname2',
- # 'age':35,
- # 'sex':'female',
- # 'birthday':datetime(1977,12,5)})
- #
- resp = self.tc.query_entities(BATCH_TABLE, '', '')
- self.assertEquals(len(resp), 0)
-
- #self.tc.commit_batch()
- return
- resp = self.tc.query_entities(BATCH_TABLE, '', '')
+ # Act
+ resp = self.tc.query_entities(self.table_name)
+
+ # Assert
self.assertEquals(len(resp), 2)
+ for entity in resp:
+ self.assertEquals(entity.PartitionKey, 'MyPartition')
+ self._assert_default_entity(entity)
+ self.assertEquals(resp[0].RowKey, '1')
+ self.assertEquals(resp[1].RowKey, '2')
- self.tc.delete_table(BATCH_TABLE)
-
- def sanity_begin_batch(self):
- resp = self.tc.begin_batch()
- self.assertEquals(resp, None)
+ def test_query_entities_with_filter(self):
+ # Arrange
+ self._create_table_with_default_entities(self.table_name, 2)
+ self.tc.insert_entity(self.table_name, self._create_default_entity_dict('MyOtherPartition', '3'))
+
+ # Act
+ resp = self.tc.query_entities(self.table_name, "PartitionKey eq 'MyPartition'")
+
+ # Assert
+ self.assertEquals(len(resp), 2)
+ for entity in resp:
+ self.assertEquals(entity.PartitionKey, 'MyPartition')
+ self._assert_default_entity(entity)
+
+ def test_query_entities_with_select(self):
+ # Arrange
+ self._create_table_with_default_entities(self.table_name, 2)
+
+ # Act
+ resp = self.tc.query_entities(self.table_name, None, 'age,sex')
+
+ # Assert
+ self.assertEquals(len(resp), 2)
+ self.assertEquals(resp[0].age, 39)
+ self.assertEquals(resp[0].sex, 'male')
+ self.assertFalse(hasattr(resp[0], "birthday"))
+ self.assertFalse(hasattr(resp[0], "married"))
+ self.assertFalse(hasattr(resp[0], "deceased"))
+
+ def test_query_entities_with_top(self):
+ # Arrange
+ self._create_table_with_default_entities(self.table_name, 3)
+
+ # Act
+ resp = self.tc.query_entities(self.table_name, None, None, 2)
+
+ # Assert
+ self.assertEquals(len(resp), 2)
+
+ def test_query_entities_with_top_and_next(self):
+ # Arrange
+ self._create_table_with_default_entities(self.table_name, 5)
+
+ # Act
+ resp1 = self.tc.query_entities(self.table_name, None, None, 2)
+ resp2 = self.tc.query_entities(self.table_name, None, None, 2, resp1.x_ms_continuation['NextPartitionKey'], resp1.x_ms_continuation['NextRowKey'])
+ resp3 = self.tc.query_entities(self.table_name, None, None, 2, resp2.x_ms_continuation['NextPartitionKey'], resp2.x_ms_continuation['NextRowKey'])
+
+ # Assert
+ self.assertEquals(len(resp1), 2)
+ self.assertEquals(len(resp2), 2)
+ self.assertEquals(len(resp3), 1)
+ self.assertEquals(resp1[0].RowKey, '1')
+ self.assertEquals(resp1[1].RowKey, '2')
+ self.assertEquals(resp2[0].RowKey, '3')
+ self.assertEquals(resp2[1].RowKey, '4')
+ self.assertEquals(resp3[0].RowKey, '5')
+
+ def test_update_entity(self):
+ # Arrange
+ self._create_table_with_default_entities(self.table_name, 1)
+
+ # Act
+ sent_entity = self._create_updated_entity_dict('MyPartition','1')
+ resp = self.tc.update_entity(self.table_name, 'MyPartition', '1', sent_entity)
+
+ # Assert
+ self.assertIsNotNone(resp)
+ received_entity = self.tc.get_entity(self.table_name, 'MyPartition', '1')
+ self._assert_updated_entity(received_entity)
+
+ def test_update_entity_with_if_matches(self):
+ # Arrange
+ entities = self._create_table_with_default_entities(self.table_name, 1)
+
+ # Act
+ sent_entity = self._create_updated_entity_dict('MyPartition','1')
+ resp = self.tc.update_entity(self.table_name, 'MyPartition', '1', sent_entity, if_match=entities[0].etag)
+
+ # Assert
+ self.assertIsNotNone(resp)
+ received_entity = self.tc.get_entity(self.table_name, 'MyPartition', '1')
+ self._assert_updated_entity(received_entity)
+
+ def test_update_entity_with_if_doesnt_match(self):
+ # Arrange
+ entities = self._create_table_with_default_entities(self.table_name, 1)
+
+ # Act
+ sent_entity = self._create_updated_entity_dict('MyPartition','1')
+ with self.assertRaises(WindowsAzureError):
+ self.tc.update_entity(self.table_name, 'MyPartition', '1', sent_entity, if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"')
+
+ # Assert
+
+ def test_insert_or_merge_entity_with_existing_entity(self):
+ # Arrange
+ self._create_table_with_default_entities(self.table_name, 1)
+
+ # Act
+ sent_entity = self._create_updated_entity_dict('MyPartition','1')
+ resp = self.tc.insert_or_merge_entity(self.table_name, 'MyPartition', '1', sent_entity)
+
+ # Assert
+ self.assertIsNotNone(resp)
+ received_entity = self.tc.get_entity(self.table_name, 'MyPartition', '1')
+ self._assert_merged_entity(received_entity)
+
+ def test_insert_or_merge_entity_with_non_existing_entity(self):
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
+ sent_entity = self._create_updated_entity_dict('MyPartition','1')
+ resp = self.tc.insert_or_merge_entity(self.table_name, 'MyPartition', '1', sent_entity)
+
+ # Assert
+ self.assertIsNotNone(resp)
+ received_entity = self.tc.get_entity(self.table_name, 'MyPartition', '1')
+ self._assert_updated_entity(received_entity)
+
+ def test_insert_or_replace_entity_with_existing_entity(self):
+ # Arrange
+ self._create_table_with_default_entities(self.table_name, 1)
+
+ # Act
+ sent_entity = self._create_updated_entity_dict('MyPartition','1')
+ resp = self.tc.insert_or_replace_entity(self.table_name, 'MyPartition', '1', sent_entity)
+
+ # Assert
+ self.assertIsNotNone(resp)
+ received_entity = self.tc.get_entity(self.table_name, 'MyPartition', '1')
+ self._assert_updated_entity(received_entity)
+
+ def test_insert_or_replace_entity_with_non_existing_entity(self):
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
+ sent_entity = self._create_updated_entity_dict('MyPartition','1')
+ resp = self.tc.insert_or_replace_entity(self.table_name, 'MyPartition', '1', sent_entity)
+
+ # Assert
+ self.assertIsNotNone(resp)
+ received_entity = self.tc.get_entity(self.table_name, 'MyPartition', '1')
+ self._assert_updated_entity(received_entity)
+
+ def test_merge_entity(self):
+ # Arrange
+ self._create_table_with_default_entities(self.table_name, 1)
+
+ # Act
+ sent_entity = self._create_updated_entity_dict('MyPartition','1')
+ resp = self.tc.merge_entity(self.table_name, 'MyPartition', '1', sent_entity)
+
+ # Assert
+ self.assertIsNotNone(resp)
+ received_entity = self.tc.get_entity(self.table_name, 'MyPartition', '1')
+ self._assert_merged_entity(received_entity)
+
+ def test_merge_entity_not_existing(self):
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
+ sent_entity = self._create_updated_entity_dict('MyPartition','1')
+ with self.assertRaises(WindowsAzureError):
+ self.tc.merge_entity(self.table_name, 'MyPartition', '1', sent_entity)
+
+ # Assert
+
+ def test_merge_entity_with_if_matches(self):
+ # Arrange
+ entities = self._create_table_with_default_entities(self.table_name, 1)
+
+ # Act
+ sent_entity = self._create_updated_entity_dict('MyPartition','1')
+ resp = self.tc.merge_entity(self.table_name, 'MyPartition', '1', sent_entity, if_match=entities[0].etag)
+
+ # Assert
+ self.assertIsNotNone(resp)
+ received_entity = self.tc.get_entity(self.table_name, 'MyPartition', '1')
+ self._assert_merged_entity(received_entity)
+
+ def test_merge_entity_with_if_doesnt_match(self):
+ # Arrange
+ entities = self._create_table_with_default_entities(self.table_name, 1)
+
+ # Act
+ sent_entity = self._create_updated_entity_dict('MyPartition','1')
+ with self.assertRaises(WindowsAzureError):
+ self.tc.merge_entity(self.table_name, 'MyPartition', '1', sent_entity, if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"')
+
+ # Assert
+
+ def test_delete_entity(self):
+ # Arrange
+ self._create_table_with_default_entities(self.table_name, 1)
+
+ # Act
+ resp = self.tc.delete_entity(self.table_name, 'MyPartition', '1')
+
+ # Assert
+ self.assertIsNone(resp)
+ with self.assertRaises(WindowsAzureError):
+ self.tc.get_entity(self.table_name, 'MyPartition', '1')
+
+ def test_delete_entity_not_existing(self):
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ self.tc.delete_entity(self.table_name, 'MyPartition', '1')
+
+ # Assert
+
+ def test_delete_entity_with_if_matches(self):
+ # Arrange
+ entities = self._create_table_with_default_entities(self.table_name, 1)
- def sanity_commit_batch(self):
- resp = self.tc.commit_batch()
- self.assertEquals(resp, None)
+ # Act
+ resp = self.tc.delete_entity(self.table_name, 'MyPartition', '1', if_match=entities[0].etag)
- def sanity_cancel_batch(self):
- resp = self.tc.cancel_batch()
- self.assertEquals(resp, None)
+ # Assert
+ self.assertIsNone(resp)
+ with self.assertRaises(WindowsAzureError):
+ self.tc.get_entity(self.table_name, 'MyPartition', '1')
- def test_query_tables_top(self):
- table_id = getUniqueTestRunID()
- for i in xrange(20):
- self.tc.create_table(table_id + str(i))
+ def test_delete_entity_with_if_doesnt_match(self):
+ # Arrange
+ entities = self._create_table_with_default_entities(self.table_name, 1)
- res = self.tc.query_tables(top = 5)
- self.assertEqual(len(res), 5)
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ self.tc.delete_entity(self.table_name, 'MyPartition', '1', if_match=u'W/"datetime\'2012-06-15T22%3A51%3A44.9662825Z\'"')
- def test_with_filter(self):
- # Single filter
+ # Assert
+
+ #--Test cases for batch ---------------------------------------------
+ def test_with_filter_single(self):
called = []
+
def my_filter(request, next):
called.append(True)
return next(request)
tc = self.tc.with_filter(my_filter)
- tc.create_table(FILTER_TABLE)
+ tc.create_table(self.table_name)
self.assertTrue(called)
del called[:]
- tc.delete_table(FILTER_TABLE)
+ tc.delete_table(self.table_name)
self.assertTrue(called)
del called[:]
- # Chained filters
+ def test_with_filter_chained(self):
+ called = []
+
def filter_a(request, next):
called.append('a')
return next(request)
@@ -413,14 +764,17 @@ def filter_b(request, next):
return next(request)
tc = self.tc.with_filter(filter_a).with_filter(filter_b)
- tc.create_table(FILTER_TABLE + '0')
+ tc.create_table(self.table_name)
self.assertEqual(called, ['b', 'a'])
- tc.delete_table(FILTER_TABLE + '0')
+ tc.delete_table(self.table_name)
def test_batch_insert(self):
- #Act
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert'
@@ -431,15 +785,18 @@ def test_batch_insert(self):
entity.test5 = datetime.utcnow()
self.tc.begin_batch()
- self.tc.insert_entity(self.test_table, entity)
+ self.tc.insert_entity(self.table_name, entity)
self.tc.commit_batch()
- #Assert
- result = self.tc.get_entity(self.test_table, '001', 'batch_insert')
+ # Assert
+ result = self.tc.get_entity(self.table_name, '001', 'batch_insert')
self.assertIsNotNone(result)
def test_batch_update(self):
- #Act
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_update'
@@ -448,21 +805,24 @@ def test_batch_update(self):
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
- self.tc.insert_entity(self.test_table, entity)
+ self.tc.insert_entity(self.table_name, entity)
- entity = self.tc.get_entity(self.test_table, '001', 'batch_update')
+ entity = self.tc.get_entity(self.table_name, '001', 'batch_update')
self.assertEqual(3, entity.test3)
entity.test2 = 'value1'
self.tc.begin_batch()
- self.tc.update_entity(self.test_table, '001', 'batch_update', entity)
+ self.tc.update_entity(self.table_name, '001', 'batch_update', entity)
self.tc.commit_batch()
- entity = self.tc.get_entity(self.test_table, '001', 'batch_update')
+ entity = self.tc.get_entity(self.table_name, '001', 'batch_update')
- #Assert
+ # Assert
self.assertEqual('value1', entity.test2)
def test_batch_merge(self):
- #Act
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_merge'
@@ -471,25 +831,28 @@ def test_batch_merge(self):
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
- self.tc.insert_entity(self.test_table, entity)
+ self.tc.insert_entity(self.table_name, entity)
- entity = self.tc.get_entity(self.test_table, '001', 'batch_merge')
+ entity = self.tc.get_entity(self.table_name, '001', 'batch_merge')
self.assertEqual(3, entity.test3)
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_merge'
entity.test2 = 'value1'
self.tc.begin_batch()
- self.tc.merge_entity(self.test_table, '001', 'batch_merge', entity)
+ self.tc.merge_entity(self.table_name, '001', 'batch_merge', entity)
self.tc.commit_batch()
- entity = self.tc.get_entity(self.test_table, '001', 'batch_merge')
+ entity = self.tc.get_entity(self.table_name, '001', 'batch_merge')
- #Assert
+ # Assert
self.assertEqual('value1', entity.test2)
self.assertEqual(1234567890, entity.test4)
def test_batch_insert_replace(self):
- #Act
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert_replace'
@@ -499,18 +862,21 @@ def test_batch_insert_replace(self):
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.tc.begin_batch()
- self.tc.insert_or_replace_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
+ self.tc.insert_or_replace_entity(self.table_name, entity.PartitionKey, entity.RowKey, entity)
self.tc.commit_batch()
- entity = self.tc.get_entity(self.test_table, '001', 'batch_insert_replace')
+ entity = self.tc.get_entity(self.table_name, '001', 'batch_insert_replace')
- #Assert
+ # Assert
self.assertIsNotNone(entity)
self.assertEqual('value', entity.test2)
self.assertEqual(1234567890, entity.test4)
def test_batch_insert_merge(self):
- #Act
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_insert_merge'
@@ -520,18 +886,21 @@ def test_batch_insert_merge(self):
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
self.tc.begin_batch()
- self.tc.insert_or_merge_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
+ self.tc.insert_or_merge_entity(self.table_name, entity.PartitionKey, entity.RowKey, entity)
self.tc.commit_batch()
- entity = self.tc.get_entity(self.test_table, '001', 'batch_insert_merge')
+ entity = self.tc.get_entity(self.table_name, '001', 'batch_insert_merge')
- #Assert
+ # Assert
self.assertIsNotNone(entity)
self.assertEqual('value', entity.test2)
self.assertEqual(1234567890, entity.test4)
def test_batch_delete(self):
- #Act
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
entity = Entity()
entity.PartitionKey = '001'
entity.RowKey = 'batch_delete'
@@ -540,16 +909,19 @@ def test_batch_delete(self):
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
- self.tc.insert_entity(self.test_table, entity)
+ self.tc.insert_entity(self.table_name, entity)
- entity = self.tc.get_entity(self.test_table, '001', 'batch_delete')
+ entity = self.tc.get_entity(self.table_name, '001', 'batch_delete')
#self.assertEqual(3, entity.test3)
self.tc.begin_batch()
- self.tc.delete_entity(self.test_table, '001', 'batch_delete')
+ self.tc.delete_entity(self.table_name, '001', 'batch_delete')
self.tc.commit_batch()
def test_batch_inserts(self):
- #Act
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
entity = Entity()
entity.PartitionKey = 'batch_inserts'
entity.test = EntityProperty('Edm.Boolean', 'true')
@@ -560,17 +932,20 @@ def test_batch_inserts(self):
self.tc.begin_batch()
for i in range(100):
entity.RowKey = str(i)
- self.tc.insert_entity(self.test_table, entity)
+ self.tc.insert_entity(self.table_name, entity)
self.tc.commit_batch()
- entities = self.tc.query_entities(self.test_table, "PartitionKey eq 'batch_inserts'", '')
+ entities = self.tc.query_entities(self.table_name, "PartitionKey eq 'batch_inserts'", '')
- #Assert
+ # Assert
self.assertIsNotNone(entities);
self.assertEqual(100, len(entities))
def test_batch_all_operations_together(self):
- #Act
+ # Arrange
+ self._create_table(self.table_name)
+
+ # Act
entity = Entity()
entity.PartitionKey = '003'
entity.RowKey = 'batch_all_operations_together-1'
@@ -579,80 +954,107 @@ def test_batch_all_operations_together(self):
entity.test3 = 3
entity.test4 = EntityProperty('Edm.Int64', '1234567890')
entity.test5 = datetime.utcnow()
- self.tc.insert_entity(self.test_table, entity)
+ self.tc.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-2'
- self.tc.insert_entity(self.test_table, entity)
+ self.tc.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-3'
- self.tc.insert_entity(self.test_table, entity)
+ self.tc.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-4'
- self.tc.insert_entity(self.test_table, entity)
+ self.tc.insert_entity(self.table_name, entity)
self.tc.begin_batch()
entity.RowKey = 'batch_all_operations_together'
- self.tc.insert_entity(self.test_table, entity)
+ self.tc.insert_entity(self.table_name, entity)
entity.RowKey = 'batch_all_operations_together-1'
- self.tc.delete_entity(self.test_table, entity.PartitionKey, entity.RowKey)
+ self.tc.delete_entity(self.table_name, entity.PartitionKey, entity.RowKey)
entity.RowKey = 'batch_all_operations_together-2'
entity.test3 = 10
- self.tc.update_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
+ self.tc.update_entity(self.table_name, entity.PartitionKey, entity.RowKey, entity)
entity.RowKey = 'batch_all_operations_together-3'
entity.test3 = 100
- self.tc.merge_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
+ self.tc.merge_entity(self.table_name, entity.PartitionKey, entity.RowKey, entity)
entity.RowKey = 'batch_all_operations_together-4'
entity.test3 = 10
- self.tc.insert_or_replace_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
+ self.tc.insert_or_replace_entity(self.table_name, entity.PartitionKey, entity.RowKey, entity)
entity.RowKey = 'batch_all_operations_together-5'
- self.tc.insert_or_merge_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
+ self.tc.insert_or_merge_entity(self.table_name, entity.PartitionKey, entity.RowKey, entity)
self.tc.commit_batch()
- #Assert
- entities = self.tc.query_entities(self.test_table, "PartitionKey eq '003'", '')
+ # Assert
+ entities = self.tc.query_entities(self.table_name, "PartitionKey eq '003'", '')
self.assertEqual(5, len(entities))
- def test_batch_negative(self):
- #Act
- entity = Entity()
- entity.PartitionKey = '001'
- entity.RowKey = 'batch_negative_1'
- entity.test = 1
-
- self.tc.insert_entity(self.test_table, entity)
- entity.test = 2
- entity.RowKey = 'batch_negative_2'
- self.tc.insert_entity(self.test_table, entity)
- entity.test = 3
- entity.RowKey = 'batch_negative_3'
- self.tc.insert_entity(self.test_table, entity)
- entity.test = -2
- self.tc.update_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
-
- try:
+ def test_batch_same_row_operations_fail(self):
+ # Arrange
+ self._create_table(self.table_name)
+ entity = self._create_default_entity_dict('001', 'batch_negative_1')
+ self.tc.insert_entity(self.table_name, entity)
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
self.tc.begin_batch()
- entity.RowKey = 'batch_negative_1'
- self.tc.update_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
- self.tc.merge_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
- self.fail('Should raise WindowsAzueError exception')
- self.tc.commit_batch()
- except:
- self.tc.cancel_batch()
- pass
-
- try:
+ entity = self._create_updated_entity_dict('001', 'batch_negative_1')
+ self.tc.update_entity(self.table_name, entity['PartitionKey'], entity['RowKey'], entity)
+
+ entity = self._create_default_entity_dict('001', 'batch_negative_1')
+ self.tc.merge_entity(self.table_name, entity['PartitionKey'], entity['RowKey'], entity)
+
+ self.tc.cancel_batch()
+
+ # Assert
+
+ def test_batch_different_partition_operations_fail(self):
+ # Arrange
+ self._create_table(self.table_name)
+ entity = self._create_default_entity_dict('001', 'batch_negative_1')
+ self.tc.insert_entity(self.table_name, entity)
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
self.tc.begin_batch()
- entity.PartitionKey = '001'
- entity.RowKey = 'batch_negative_1'
- self.tc.update_entity(self.test_table, entity.PartitionKey, entity.RowKey, entity)
- entity.PartitionKey = '002'
- entity.RowKey = 'batch_negative_1'
- self.tc.insert_entity(self.test_table, entity)
- self.fail('Should raise WindowsAzueError exception')
- self.tc.commit_batch()
- except:
- self.tc.cancel_batch()
- pass
-
+ entity = self._create_updated_entity_dict('001', 'batch_negative_1')
+ self.tc.update_entity(self.table_name, entity['PartitionKey'], entity['RowKey'], entity)
+
+ entity = self._create_default_entity_dict('002', 'batch_negative_1')
+ self.tc.insert_entity(self.table_name, entity)
+
+ self.tc.cancel_batch()
+
+ # Assert
+
+ def test_batch_different_table_operations_fail(self):
+ # Arrange
+ other_table_name = self.table_name + 'other'
+ self.additional_table_names = [other_table_name]
+ self._create_table(self.table_name)
+ self._create_table(other_table_name)
+
+ # Act
+ with self.assertRaises(WindowsAzureError):
+ self.tc.begin_batch()
+
+ entity = self._create_default_entity_dict('001', 'batch_negative_1')
+ self.tc.insert_entity(self.table_name, entity)
+
+ entity = self._create_default_entity_dict('001', 'batch_negative_2')
+ self.tc.insert_entity(other_table_name, entity)
+
+ self.tc.cancel_batch()
+
+ def test_unicode_xml_encoding(self):
+ ''' regression test for github issue #57'''
+ # Act
+ self._create_table(self.table_name)
+ self.tc.insert_entity(self.table_name, {'PartitionKey': 'test', 'RowKey': 'test1', 'Description': u'ꀕ'})
+ self.tc.insert_entity(self.table_name, {'PartitionKey': 'test', 'RowKey': 'test2', 'Description': 'ꀕ'})
+ resp = self.tc.query_entities(self.table_name, "PartitionKey eq 'test'")
+ # Assert
+ self.assertEquals(len(resp), 2)
+ self.assertEquals(resp[0].Description, u'ꀕ')
+ self.assertEquals(resp[1].Description, u'ꀕ')
+
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
diff --git a/test/azuretest/util.py b/test/azuretest/util.py
index 5a803bd297cc..bd8d52baa001 100644
--- a/test/azuretest/util.py
+++ b/test/azuretest/util.py
@@ -1,19 +1,22 @@
-#------------------------------------------------------------------------------
-# Copyright (c) Microsoft Corporation.
+#-------------------------------------------------------------------------
+# Copyright (c) Microsoft. All rights reserved.
#
-# This source code is subject to terms and conditions of the Apache License,
-# Version 2.0. A copy of the license can be found in the License.html file at
-# the root of this distribution. If you cannot locate the Apache License,
-# Version 2.0, please send an email to vspython@microsoft.com. By using this
-# source code in any fashion, you are agreeing to be bound by the terms of the
-# Apache License, Version 2.0.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# You must not remove this notice, or any other, from this software.
-#------------------------------------------------------------------------------
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#--------------------------------------------------------------------------
import json
import os
import time
+import unittest
from exceptions import EnvironmentError
STATUS_OK = 200
@@ -48,6 +51,12 @@ def __init__(self):
with open(tmpName, "r") as f:
self.ns = json.load(f)
+ def getManagementCertFile(self):
+ return self.ns[u'managementcertfile']
+
+ def getSubscriptionId(self):
+ return self.ns[u'subscriptionid']
+
def getServiceBusKey(self):
return self.ns[u'servicebuskey']
@@ -60,8 +69,14 @@ def getStorageServicesKey(self):
def getStorageServicesName(self):
return self.ns[u'storageservicesname']
- def getHostServiceID(self):
- return self.ns[u'hostserviceid']
+ def getLinuxOSVHD(self):
+ return self.ns[u'linuxosvhd']
+
+ def getProxyHost(self):
+ return self.ns[u'proxyhost']
+
+ def getProxyPort(self):
+ return self.ns[u'proxyport']
credentials = Credentials()
@@ -83,7 +98,8 @@ def getUniqueTestRunID():
for bad in ["-", "_", " ", "."]:
ret_val = ret_val.replace(bad, "")
ret_val = ret_val.lower().strip()
- return ret_val
+ #only return the first 20 characters so the lenghth of queue, table name will be less than 64. It may not be unique but doesn't really matter for the tests.
+ return ret_val[:20]
def getUniqueNameBasedOnCurrentTime(base_name):
'''
@@ -96,3 +112,18 @@ def getUniqueNameBasedOnCurrentTime(base_name):
cur_time = cur_time.replace(bad, "")
cur_time = cur_time.lower().strip()
return base_name + cur_time
+
+class AzureTestCase(unittest.TestCase):
+ def assertNamedItemInContainer(self, container, item_name, msg=None):
+ for item in container:
+ if item.name == item_name:
+ return
+
+ standardMsg = '%s not found in %s' % (repr(item_name), repr(container))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertNamedItemNotInContainer(self, container, item_name, msg=None):
+ for item in container:
+ if item.name == item_name:
+ standardMsg = '%s unexpectedly found in %s' % (repr(item_name), repr(container))
+ self.fail(self._formatMessage(msg, standardMsg))
diff --git a/test/run.bat b/test/run.bat
index 1586880606e5..b610556263da 100644
--- a/test/run.bat
+++ b/test/run.bat
@@ -1,16 +1,18 @@
@echo OFF
SETLOCAL
REM----------------------------------------------------------------------------
-REM Copyright (c) Microsoft Corporation.
+REM Copyright (c) Microsoft. All rights reserved.
REM
-REM This source code is subject to terms and conditions of the Apache License,
-REM Version 2.0. A copy of the license can be found in the License.html file at
-REM the root of this distribution. If you cannot locate the Apache License,
-REM Version 2.0, please send an email to vspython@microsoft.com. By using this
-REM source code in any fashion, you are agreeing to be bound by the terms of the
-REM Apache License, Version 2.0.
+REM Licensed under the Apache License, Version 2.0 (the "License");
+REM you may not use this file except in compliance with the License.
+REM You may obtain a copy of the License at
+REM http://www.apache.org/licenses/LICENSE-2.0
REM
-REM You must not remove this notice, or any other, from this software.
+REM Unless required by applicable law or agreed to in writing, software
+REM distributed under the License is distributed on an "AS IS" BASIS,
+REM WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+REM See the License for the specific language governing permissions and
+REM limitations under the License.
REM----------------------------------------------------------------------------
cls