From 06289da022e264c3135413cdb064a414a35e1c8c Mon Sep 17 00:00:00 2001 From: Priti Sarap Date: Wed, 17 Feb 2016 10:32:01 +0530 Subject: [PATCH 001/687] CLOUDSTACK-8717: Failed to start instance after restoring the running instance --- .../testpaths/testpath_restore_vm.py | 211 ++++++++++++++++++ 1 file changed, 211 insertions(+) create mode 100644 test/integration/testpaths/testpath_restore_vm.py diff --git a/test/integration/testpaths/testpath_restore_vm.py b/test/integration/testpaths/testpath_restore_vm.py new file mode 100644 index 000000000000..d73499ad21a3 --- /dev/null +++ b/test/integration/testpaths/testpath_restore_vm.py @@ -0,0 +1,211 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Test restore running VM on VMWare with one cluster having 2 Primary Storage +""" + + +from nose.plugins.attrib import attr +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.lib.utils import cleanup_resources, validateList +from marvin.lib.base import (Account, + ServiceOffering, + VirtualMachine, + StoragePool + ) +from marvin.lib.common import (get_domain, + get_zone, + get_template, + list_volumes, + list_virtual_machines + ) + +from marvin.codes import CLUSTERTAG1, ROOT, PASS +import time + + +class TestRestoreVM(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestRestoreVM, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.testdata = testClient.getParsedTestDataConfig() + cls.hypervisor = cls.testClient.getHypervisorInfo() + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + + cls.template = get_template( + cls.apiclient, + cls.zone.id, + cls.testdata["ostype"]) + + cls._cleanup = [] + + try: + cls.skiptest = False + if cls.hypervisor.lower() not in ["vmware"]: + cls.skiptest = True + return + + # Create an account + cls.account = Account.create( + cls.apiclient, + cls.testdata["account"], + domainid=cls.domain.id + ) + cls._cleanup.append(cls.account) + # Create user api client of the account + cls.userapiclient = testClient.getUserApiClient( + UserName=cls.account.name, + DomainName=cls.account.domain + ) + # Create Service offering + cls.service_offering_cwps = ServiceOffering.create( + cls.apiclient, + cls.testdata["service_offering"], + tags=CLUSTERTAG1 + ) + cls._cleanup.append(cls.service_offering_cwps) + except Exception as e: + cls.tearDownClass() + raise e + return + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiclient, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + def setUp(self): + + self.cleanup = [] + if self.skiptest: + self.skipTest("This test is to be checked on VMWare only \ + Hence, skip for %s" % self.hypervisor) + + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + + def tearDown(self): + try: + if self.pools: + StoragePool.update( + self.apiclient, + id=self.pools[0].id, + tags="") + + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + @attr(tags=["advanced", "basic"], required_hardware="true") + def test_01_recover_VM(self): + """ Test Restore VM on VMWare + 1. Deploy a VM without datadisk + 2. Restore the VM + 3. Verify that VM comes up in Running state + """ + try: + self.pools = StoragePool.list( + self.apiclient, + zoneid=self.zone.id, + scope="CLUSTER") + + status = validateList(self.pools) + + # Step 3 + self.assertEqual( + status[0], + PASS, + "Check: Failed to list cluster wide storage pools") + + if len(self.pools) < 2: + self.skipTest("There must be at atleast two cluster wide\ + storage pools available in the setup") + + except Exception as e: + self.skipTest(e) + + # Adding tags to Storage Pools + cluster_no = 1 + StoragePool.update( + self.apiclient, + id=self.pools[0].id, + tags=[CLUSTERTAG1[:-1] + repr(cluster_no)]) + + self.vm = VirtualMachine.create( + self.apiclient, + self.testdata["small"], + accountid=self.account.name, + templateid=self.template.id, + domainid=self.account.domainid, + serviceofferingid=self.service_offering_cwps.id, + zoneid=self.zone.id, + ) + # Step 2 + + volumes_root_list = list_volumes( + self.apiclient, + virtualmachineid=self.vm.id, + type=ROOT, + listall=True + ) + + root_volume = volumes_root_list[0] + + # Restore VM till its ROOT disk is recreated on onother Primary Storage + while True: + self.vm.restore(self.apiclient) + volumes_root_list = list_volumes( + self.apiclient, + virtualmachineid=self.vm.id, + type=ROOT, + listall=True + ) + + root_volume = volumes_root_list[0] + + if root_volume.storage != self.pools[0].name: + break + + # Step 3 + vm_list = list_virtual_machines( + self.apiclient, + id=self.vm.id) + + state = vm_list[0].state + i = 0 + while(state != "Running"): + vm_list = list_virtual_machines( + self.apiclient, + id=self.vm.id) + + time.sleep(10) + i = i + 1 + state = vm_list[0].state + if i >= 10: + self.fail("Restore VM Failed") + break + + return From 5d4b5fd642d250f45ec62ea7f28577d885e3665a Mon Sep 17 00:00:00 2001 From: Aaron Brady Date: Wed, 23 Mar 2016 12:15:24 +0000 Subject: [PATCH 002/687] Use timeout when applying config to virtual router --- .../agent/resource/virtualnetwork/VirtualRoutingResource.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java b/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java index f3edc6967594..f271e72104fb 100644 --- a/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java +++ b/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java @@ -379,7 +379,7 @@ private Answer execute(AggregationControlCommand cmd) { timeout = 120; } - ExecutionResult result = applyConfigToVR(cmd.getRouterAccessIp(), fileConfigItem); + ExecutionResult result = applyConfigToVR(cmd.getRouterAccessIp(), fileConfigItem, timeout); if (!result.isSuccess()) { return new Answer(cmd, false, result.getDetails()); } From ddfc368117a3548cdf198d4f19f484365cf99aa8 Mon Sep 17 00:00:00 2001 From: Aaron Brady Date: Sat, 26 Mar 2016 00:38:57 +0000 Subject: [PATCH 003/687] Remove dangerous prototype of applyConfigToVR --- .../virtualnetwork/VirtualRoutingResource.java | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java b/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java index f271e72104fb..2d22b148471b 100644 --- a/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java +++ b/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java @@ -152,11 +152,10 @@ private Answer executeQueryCommand(NetworkElementCommand cmd) { } } - private ExecutionResult applyConfigToVR(String routerAccessIp, ConfigItem c) { - return applyConfigToVR(routerAccessIp, c, VRScripts.DEFAULT_EXECUTEINVR_TIMEOUT); - } - private ExecutionResult applyConfigToVR(String routerAccessIp, ConfigItem c, int timeout) { + if (timeout < VRScripts.DEFAULT_EXECUTEINVR_TIMEOUT) { + timeout = VRScripts.DEFAULT_EXECUTEINVR_TIMEOUT; + } if (c instanceof FileConfigItem) { FileConfigItem configItem = (FileConfigItem)c; return _vrDeployer.createFileInVR(routerAccessIp, configItem.getFilePath(), configItem.getFileName(), configItem.getFileContents()); @@ -180,7 +179,7 @@ private Answer applyConfig(NetworkElementCommand cmd, List cfg) { boolean finalResult = false; for (ConfigItem configItem : cfg) { long startTimestamp = System.currentTimeMillis(); - ExecutionResult result = applyConfigToVR(cmd.getRouterAccessIp(), configItem); + ExecutionResult result = applyConfigToVR(cmd.getRouterAccessIp(), configItem, VRScripts.DEFAULT_EXECUTEINVR_TIMEOUT); if (s_logger.isDebugEnabled()) { long elapsed = System.currentTimeMillis() - startTimestamp; s_logger.debug("Processing " + configItem + " took " + elapsed + "ms"); @@ -375,9 +374,6 @@ private Answer execute(AggregationControlCommand cmd) { ScriptConfigItem scriptConfigItem = new ScriptConfigItem(VRScripts.VR_CFG, "-c " + VRScripts.CONFIG_CACHE_LOCATION + cfgFileName); // 120s is the minimal timeout int timeout = answerCounts * _eachTimeout; - if (timeout < 120) { - timeout = 120; - } ExecutionResult result = applyConfigToVR(cmd.getRouterAccessIp(), fileConfigItem, timeout); if (!result.isSuccess()) { From 8daaa30fc895b2e54bf4adaea01429008c5b368a Mon Sep 17 00:00:00 2001 From: sanjeevn Date: Mon, 21 Mar 2016 18:22:08 +0530 Subject: [PATCH 004/687] [CLOUDSTACK-9337]Enhance vcenter.py to created data center in vcenter server automatically (Programmatically) --- tools/marvin/marvin/lib/vcenter.py | 169 ++++++++++++++++++++++++++++- 1 file changed, 164 insertions(+), 5 deletions(-) diff --git a/tools/marvin/marvin/lib/vcenter.py b/tools/marvin/marvin/lib/vcenter.py index f7e09873d5ec..d14f364bd1cc 100644 --- a/tools/marvin/marvin/lib/vcenter.py +++ b/tools/marvin/marvin/lib/vcenter.py @@ -19,9 +19,12 @@ from pyVim import connect import atexit import ssl -if hasattr(ssl, '_create_unverified_context'): - ssl._create_default_https_context = ssl._create_unverified_context +import subprocess +import time +import json +if hasattr(ssl, '_create_unverified_context'): + ssl._create_default_https_context = ssl._create_unverified_context() class Vcenter(): @@ -29,9 +32,15 @@ def __init__(self, host, user, pwd): """ create a service_instance object """ - self.service_instance = connect.SmartConnect(host=host, - user=user, - pwd=pwd) + if hasattr(ssl, '_create_default_https_context'): + self.service_instance = connect.SmartConnect(host=host, + user=user, + pwd=pwd, + sslContext=ssl._create_default_https_context) + else: + self.service_instance = connect.SmartConnect(host=host, + user=user, + pwd=pwd) atexit.register(connect.Disconnect, self.service_instance) @staticmethod @@ -183,7 +192,157 @@ def get_clusters(self, dc, clus=None): """ pass + def create_datacenter(self, dcname=None, service_instance=None, folder=None): + """ + Creates a new datacenter with the given name. + Any % (percent) character used in this name parameter must be escaped, + unless it is used to start an escape sequence. Clients may also escape + any other characters in this name parameter. + + An entity name must be a non-empty string of + less than 80 characters. The slash (/), backslash (\) and percent (%) + will be escaped using the URL syntax. For example, %2F + + This can raise the following exceptions: + vim.fault.DuplicateName + vim.fault.InvalidName + vmodl.fault.NotSupported + vmodl.fault.RuntimeFault + ValueError raised if the name len is > 79 + https://github.com/vmware/pyvmomi/blob/master/docs/vim/Folder.rst + + Required Privileges + Datacenter.Create + + :param folder: Folder object to create DC in. If None it will default to + rootFolder + :param dcname: Name for the new datacenter. + :param service_instance: ServiceInstance connection to a given vCenter + :return: + """ + if len(dcname) > 79: + raise ValueError("The name of the datacenter must be under " + "80 characters.") + + if folder is None: + folder = self.service_instance.content.rootFolder + + if folder is not None and isinstance(folder, vim.Folder): + dc_moref = folder.CreateDatacenter(name=dcname) + return dc_moref + + def create_cluster(self, cluster_name, datacenter): + """ + Method to create a Cluster in vCenter + + :param cluster_name: Name of the cluster + :param datacenter: Name of the data center + :return: Cluster MORef + """ + # cluster_name = kwargs.get("name") + # cluster_spec = kwargs.get("cluster_spec") + # datacenter = kwargs.get("datacenter") + + if cluster_name is None: + raise ValueError("Missing value for name.") + if datacenter is None: + raise ValueError("Missing value for datacenter.") + + cluster_spec = vim.cluster.ConfigSpecEx() + + host_folder = datacenter.hostFolder + cluster = host_folder.CreateClusterEx(name=cluster_name, spec=cluster_spec) + return cluster + + def add_host(self, cluster, hostname, sslthumbprint, username, password): + """ + Method to add host in a vCenter Cluster + + :param cluster_name + :param hostname + :param username + :param password + """ + if hostname is None: + raise ValueError("Missing value for name.") + try: + hostspec = vim.host.ConnectSpec(hostName=hostname, + userName=username, + sslThumbprint=sslthumbprint, + password=password, + force=True) + task = cluster.AddHost(spec=hostspec, asConnected=True) + except Exception as e: + print "Error adding host :%s" % e + self.wait_for_task(task) + host = self._get_obj([vim.HostSystem], hostname) + return host + + def create_datacenters(self, config): + """ + Method to create data centers in vCenter server programmatically + It expects configuration data in the form of dictionary. + configuration file is same as the one we pass to deployDataCenter.py for creating + datacenter in CS + + :param config: + :return: + """ + zones = config['zones'] + try: + for zone in zones: + dc_obj = self.create_datacenter(zone['name']) + for pod in zone['pods']: + for cluster in pod['clusters']: + clustername = cluster['clustername'].split('/')[-1] + cluster_obj = self.create_cluster( + cluster_name=clustername, + datacenter=dc_obj + ) + for host in cluster['hosts']: + host_ip = host['url'].split("//")[-1] + user = host['username'] + passwd = host['password'] + sslthumbprint=self.getsslThumbprint(host_ip) + self.add_host(cluster=cluster_obj, + hostname=host_ip, + sslthumbprint=sslthumbprint, + username=user, + password=passwd) + except Exception as e: + print "Failed to create datacenter: %s" % e + + def wait_for_task(self, task): + + while task.info.state == (vim.TaskInfo.State.running or vim.TaskInfo.State.queued): + time.sleep(2) + + if task.info.state == vim.TaskInfo.State.success: + if task.info.result is not None: + out = 'Task completed successfully, result: %s' % (task.info.result,) + print out + elif task.info.state == vim.TaskInfo.State.error: + out = 'Error - Task did not complete successfully: %s' % (task.info.error,) + raise ValueError(out) + return task.info.result + + def getsslThumbprint(self,ip): + p1 = subprocess.Popen(('echo', '-n'), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p2 = subprocess.Popen(('openssl', 's_client', '-connect', '{0}:443'.format(ip)), + stdin=p1.stdout, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + p3 = subprocess.Popen(('openssl', 'x509', '-noout', '-fingerprint', '-sha1'), + stdin=p2.stdout, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + out = p3.stdout.read() + ssl_thumbprint = out.split('=')[-1].strip() + return ssl_thumbprint + if __name__ == '__main__': vc_object = Vcenter("10.x.x.x", "username", "password") From 69647b38ce96fb40131ad480a59f034701da5369 Mon Sep 17 00:00:00 2001 From: Priyank Parihar Date: Fri, 11 Dec 2015 11:05:46 +0530 Subject: [PATCH 005/687] CLOUDSTACK-8841: Storage XenMotion from XS 6.2 to XS 6.5 fails. --- server/src/com/cloud/vm/UserVmManagerImpl.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index cc007d902106..f1a523b9b7d4 100644 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -4717,8 +4717,8 @@ public VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinatio } // Check if the source and destination hosts are of the same type and support storage motion. - if (!(srcHost.getHypervisorType().equals(destinationHost.getHypervisorType()) && srcHost.getHypervisorVersion().equals(destinationHost.getHypervisorVersion()))) { - throw new CloudRuntimeException("The source and destination hosts are not of the same type and version. " + "Source hypervisor type and version: " + if (!(srcHost.getHypervisorType().equals(destinationHost.getHypervisorType()))) { + throw new CloudRuntimeException("The source and destination hosts are not of the same type. " + "Source hypervisor type and version: " + srcHost.getHypervisorType().toString() + " " + srcHost.getHypervisorVersion() + ", Destination hypervisor type and version: " + destinationHost.getHypervisorType().toString() + " " + destinationHost.getHypervisorVersion()); } From 50cbaf9f19a9179412478ec147c70dcf57763f59 Mon Sep 17 00:00:00 2001 From: sanjeevn Date: Mon, 7 Dec 2015 17:55:50 +0530 Subject: [PATCH 006/687] Marvin test to verify that adding TCP ports 500,4500 and 1701 in vpn should not fail Bug-Id: CS-43653 Reviewed-by: Self Made changes as per pavanb018 review comments --- test/integration/component/test_vpn_users.py | 75 +++++++++++++++++++- 1 file changed, 73 insertions(+), 2 deletions(-) diff --git a/test/integration/component/test_vpn_users.py b/test/integration/component/test_vpn_users.py index 72e10a4d6e63..3da7c8e7f2ea 100644 --- a/test/integration/component/test_vpn_users.py +++ b/test/integration/component/test_vpn_users.py @@ -29,13 +29,15 @@ Vpn, VpnUser, Configurations, - NATRule + NATRule, + FireWallRule ) from marvin.lib.common import (get_domain, get_zone, get_template ) -from marvin.lib.utils import cleanup_resources +from marvin.lib.utils import cleanup_resources, validateList +from marvin.codes import PASS class Services: @@ -451,3 +453,72 @@ def test_07_add_VPN_user_domain_admin(self): self.fail("Domain admin should be allowed to create VPN user: %s" % e) return + + @attr(tags=["advanced", "advancedns"], required_hardware="false") + def test_08_add_TCP_PF_Rule_In_VPN(self): + """ + Test to add TCP Port Forwarding rule for specific ports(500,1701 and 4500) in VPN + """ + # Steps for verification + # 1. Enable vpn on SourceNAT IP address + # 2. Configure PF with TCP ports 500,1701 and 4500. It should be allowed + # Should not conflict with UPD ports used for VPN + + vm_res = VirtualMachine.list( + self.apiclient, + id=self.virtual_machine.id, + listall=True + ) + self.assertEqual( + validateList(vm_res)[0], + PASS, + "Failed to list virtual machine" + ) + network_id = vm_res[0].nic[0].networkid + src_nat_list = PublicIPAddress.list( + self.apiclient, + account=self.account.name, + domainid=self.account.domainid, + listall=True, + issourcenat=True, + associatednetworkid=network_id + ) + self.assertEqual( + validateList(src_nat_list)[0], + PASS, + "Failed to list source nat ip address" + ) + ip = src_nat_list[0] + try: + vpn = Vpn.create( + self.apiclient, + publicipid=ip.id, + account=self.account.name, + domainid=self.account.domainid, + ) + self.assertIsNotNone( + vpn, + "Failed to create remote access vpn" + ) + except Exception as e: + self.fail("Failed to enable vpn on SourceNAT IP with error: %s" % e) + + #Create PF rule with TCP ports 500,4500 and 1701 + self.services['natrule']['protocol']="TCP" + for port in [500, 4500, 1701]: + self.services['natrule']['privateport'] = port + self.services['natrule']['publicport'] = port + try: + nat = NATRule.create( + self.apiclient, + self.virtual_machine, + self.services["natrule"], + ip.id + ) + self.assertIsNotNone( + nat, + "Failed to add PF rule with tcp parts matching vpn" + ) + except Exception as e: + self.fail("Creating PF rule for TCP port %s in VPN failed : %s" % (port, e)) + return From c92dcd2b32f6be4ef8915219698b9a4f3025cecc Mon Sep 17 00:00:00 2001 From: Manfred Touron Date: Sat, 14 May 2016 21:38:42 +0200 Subject: [PATCH 007/687] Compabitility fix for Docker >= 1.11 (docker/docker#19490) --- scripts/vm/systemvm/injectkeys.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/vm/systemvm/injectkeys.sh b/scripts/vm/systemvm/injectkeys.sh index 863ca6a4a304..9df1718253f2 100755 --- a/scripts/vm/systemvm/injectkeys.sh +++ b/scripts/vm/systemvm/injectkeys.sh @@ -86,7 +86,7 @@ systemvmpath=$3 command -v mkisofs > /dev/null || (echo "$(basename $0): mkisofs not found, please install or ensure PATH is accurate" ; exit 4) # if running into Docker as unprivileges, skip ssh verification as iso cannot be mounted due to missing loop device. -if [ -f /.dockerinit ]; then +if [ -f /.dockerenv ]; then if [ -e /dev/loop0 ]; then # it's a docker instance with privileges. inject_into_iso systemvm.iso $newpubkey From 62aa3b2bfaeb665ba960d8115cb93541bb42adc4 Mon Sep 17 00:00:00 2001 From: Will Stevens Date: Fri, 29 Jul 2016 10:11:34 -0400 Subject: [PATCH 008/687] Updating pom.xml version numbers for release 4.10.0-SNAPSHOT Signed-off-by: Will Stevens --- agent/pom.xml | 2 +- api/pom.xml | 2 +- client/pom.xml | 2 +- core/pom.xml | 2 +- debian/changelog | 2 +- developer/pom.xml | 2 +- engine/api/pom.xml | 2 +- engine/components-api/pom.xml | 2 +- engine/network/pom.xml | 2 +- engine/orchestration/pom.xml | 2 +- engine/pom.xml | 2 +- engine/schema/pom.xml | 2 +- engine/service/pom.xml | 2 +- engine/storage/cache/pom.xml | 2 +- engine/storage/datamotion/pom.xml | 2 +- engine/storage/image/pom.xml | 2 +- engine/storage/integration-test/pom.xml | 2 +- engine/storage/pom.xml | 2 +- engine/storage/snapshot/pom.xml | 2 +- engine/storage/volume/pom.xml | 2 +- framework/cluster/pom.xml | 2 +- framework/config/pom.xml | 2 +- framework/db/pom.xml | 2 +- framework/events/pom.xml | 2 +- framework/ipc/pom.xml | 2 +- framework/jobs/pom.xml | 2 +- framework/managed-context/pom.xml | 2 +- framework/pom.xml | 2 +- framework/quota/pom.xml | 2 +- framework/rest/pom.xml | 2 +- framework/security/pom.xml | 2 +- framework/spring/lifecycle/pom.xml | 2 +- framework/spring/module/pom.xml | 2 +- maven-standard/pom.xml | 2 +- plugins/acl/dynamic-role-based/pom.xml | 2 +- plugins/acl/static-role-based/pom.xml | 2 +- plugins/affinity-group-processors/explicit-dedication/pom.xml | 2 +- plugins/affinity-group-processors/host-anti-affinity/pom.xml | 2 +- plugins/alert-handlers/snmp-alerts/pom.xml | 2 +- plugins/alert-handlers/syslog-alerts/pom.xml | 2 +- plugins/api/discovery/pom.xml | 2 +- plugins/api/rate-limit/pom.xml | 2 +- plugins/api/solidfire-intg-test/pom.xml | 2 +- plugins/database/mysql-ha/pom.xml | 2 +- plugins/database/quota/pom.xml | 2 +- plugins/dedicated-resources/pom.xml | 2 +- plugins/deployment-planners/implicit-dedication/pom.xml | 2 +- plugins/deployment-planners/user-concentrated-pod/pom.xml | 2 +- plugins/deployment-planners/user-dispersing/pom.xml | 2 +- plugins/event-bus/inmemory/pom.xml | 2 +- plugins/event-bus/kafka/pom.xml | 2 +- plugins/event-bus/rabbitmq/pom.xml | 2 +- plugins/file-systems/netapp/pom.xml | 2 +- plugins/ha-planners/skip-heurestics/pom.xml | 2 +- plugins/host-allocators/random/pom.xml | 2 +- plugins/hypervisors/baremetal/pom.xml | 2 +- plugins/hypervisors/hyperv/pom.xml | 2 +- plugins/hypervisors/kvm/pom.xml | 2 +- plugins/hypervisors/ovm/pom.xml | 2 +- plugins/hypervisors/ovm3/pom.xml | 2 +- plugins/hypervisors/simulator/pom.xml | 2 +- plugins/hypervisors/ucs/pom.xml | 2 +- plugins/hypervisors/vmware/pom.xml | 2 +- plugins/hypervisors/xenserver/pom.xml | 2 +- plugins/network-elements/bigswitch/pom.xml | 2 +- plugins/network-elements/brocade-vcs/pom.xml | 2 +- plugins/network-elements/cisco-vnmc/pom.xml | 2 +- plugins/network-elements/dns-notifier/pom.xml | 2 +- plugins/network-elements/elastic-loadbalancer/pom.xml | 2 +- plugins/network-elements/f5/pom.xml | 2 +- plugins/network-elements/globodns/pom.xml | 2 +- plugins/network-elements/internal-loadbalancer/pom.xml | 2 +- plugins/network-elements/juniper-contrail/pom.xml | 2 +- plugins/network-elements/juniper-srx/pom.xml | 2 +- plugins/network-elements/midonet/pom.xml | 2 +- plugins/network-elements/netscaler/pom.xml | 2 +- plugins/network-elements/nicira-nvp/pom.xml | 4 ++-- plugins/network-elements/nuage-vsp/pom.xml | 2 +- plugins/network-elements/opendaylight/pom.xml | 2 +- plugins/network-elements/ovs/pom.xml | 2 +- plugins/network-elements/palo-alto/pom.xml | 2 +- plugins/network-elements/stratosphere-ssp/pom.xml | 2 +- plugins/network-elements/vxlan/pom.xml | 2 +- plugins/outofbandmanagement-drivers/ipmitool/pom.xml | 2 +- plugins/pom.xml | 2 +- plugins/storage-allocators/random/pom.xml | 2 +- plugins/storage/image/default/pom.xml | 2 +- plugins/storage/image/s3/pom.xml | 2 +- plugins/storage/image/sample/pom.xml | 2 +- plugins/storage/image/swift/pom.xml | 2 +- plugins/storage/volume/cloudbyte/pom.xml | 2 +- plugins/storage/volume/default/pom.xml | 2 +- plugins/storage/volume/nexenta/pom.xml | 2 +- plugins/storage/volume/sample/pom.xml | 2 +- plugins/storage/volume/solidfire/pom.xml | 2 +- plugins/user-authenticators/ldap/pom.xml | 2 +- plugins/user-authenticators/md5/pom.xml | 2 +- plugins/user-authenticators/pbkdf2/pom.xml | 2 +- plugins/user-authenticators/plain-text/pom.xml | 2 +- plugins/user-authenticators/saml2/pom.xml | 2 +- plugins/user-authenticators/sha256salted/pom.xml | 2 +- pom.xml | 2 +- quickcloud/pom.xml | 2 +- server/pom.xml | 2 +- services/console-proxy-rdp/rdpconsole/pom.xml | 2 +- services/console-proxy/plugin/pom.xml | 2 +- services/console-proxy/pom.xml | 2 +- services/console-proxy/server/pom.xml | 2 +- services/iam/plugin/pom.xml | 2 +- services/iam/server/pom.xml | 2 +- services/pom.xml | 2 +- services/secondary-storage/controller/pom.xml | 2 +- services/secondary-storage/pom.xml | 2 +- services/secondary-storage/server/pom.xml | 2 +- systemvm/pom.xml | 2 +- test/pom.xml | 2 +- tools/apidoc/pom.xml | 2 +- tools/checkstyle/pom.xml | 2 +- tools/devcloud-kvm/pom.xml | 2 +- tools/devcloud/pom.xml | 2 +- tools/devcloud4/pom.xml | 2 +- tools/marvin/pom.xml | 2 +- tools/marvin/setup.py | 2 +- tools/pom.xml | 2 +- tools/wix-cloudstack-maven-plugin/pom.xml | 2 +- usage/pom.xml | 2 +- utils/pom.xml | 2 +- vmware-base/pom.xml | 2 +- 128 files changed, 129 insertions(+), 129 deletions(-) diff --git a/agent/pom.xml b/agent/pom.xml index 9ff0c5e23cfc..51f70dba2a6b 100644 --- a/agent/pom.xml +++ b/agent/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT diff --git a/api/pom.xml b/api/pom.xml index 6b9c98f9c191..421e06c0afab 100644 --- a/api/pom.xml +++ b/api/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT diff --git a/client/pom.xml b/client/pom.xml index 7310cef4f916..d68689297c84 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -17,7 +17,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT diff --git a/core/pom.xml b/core/pom.xml index f50f8af7f9b5..ba404b043656 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -25,7 +25,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT diff --git a/debian/changelog b/debian/changelog index 04ae4f75373a..fd268312e926 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -cloudstack (4.9.0-SNAPSHOT) unstable; urgency=low +cloudstack (4.10.0-SNAPSHOT) unstable; urgency=low [ Remi Bergsma ] * Update the version to 4.9.0.snapshot diff --git a/developer/pom.xml b/developer/pom.xml index b7d70004b064..9fe93157263e 100644 --- a/developer/pom.xml +++ b/developer/pom.xml @@ -18,7 +18,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT diff --git a/engine/api/pom.xml b/engine/api/pom.xml index 1f539fbd0453..6d358e8ec0cc 100644 --- a/engine/api/pom.xml +++ b/engine/api/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/engine/components-api/pom.xml b/engine/components-api/pom.xml index 7ea56d66dc50..ab7463dcc89a 100644 --- a/engine/components-api/pom.xml +++ b/engine/components-api/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-engine - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/engine/network/pom.xml b/engine/network/pom.xml index a41ed26275cc..cccc0f7c1861 100644 --- a/engine/network/pom.xml +++ b/engine/network/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-engine - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/engine/orchestration/pom.xml b/engine/orchestration/pom.xml index ee3bb34200e9..d0c97e61eea2 100755 --- a/engine/orchestration/pom.xml +++ b/engine/orchestration/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-engine - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/engine/pom.xml b/engine/pom.xml index c3c0b28e76bf..68a9e6aa0036 100644 --- a/engine/pom.xml +++ b/engine/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml index 4d0b90d6de0d..d63d24bb0b31 100644 --- a/engine/schema/pom.xml +++ b/engine/schema/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-engine - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/engine/service/pom.xml b/engine/service/pom.xml index 7f07323d48a0..8d47cae07bed 100644 --- a/engine/service/pom.xml +++ b/engine/service/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloud-engine - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT cloud-engine-service war diff --git a/engine/storage/cache/pom.xml b/engine/storage/cache/pom.xml index 976a2e75a836..9418536ac5d8 100644 --- a/engine/storage/cache/pom.xml +++ b/engine/storage/cache/pom.xml @@ -15,7 +15,7 @@ org.apache.cloudstack cloud-engine - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/engine/storage/datamotion/pom.xml b/engine/storage/datamotion/pom.xml index 3324270198c6..5f4497aaccc7 100644 --- a/engine/storage/datamotion/pom.xml +++ b/engine/storage/datamotion/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/engine/storage/image/pom.xml b/engine/storage/image/pom.xml index 779f1399abdc..9c75670953b8 100644 --- a/engine/storage/image/pom.xml +++ b/engine/storage/image/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/engine/storage/integration-test/pom.xml b/engine/storage/integration-test/pom.xml index d64812957418..5f7a898d186d 100644 --- a/engine/storage/integration-test/pom.xml +++ b/engine/storage/integration-test/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/engine/storage/pom.xml b/engine/storage/pom.xml index cf9a8ea97372..4866632d18a0 100644 --- a/engine/storage/pom.xml +++ b/engine/storage/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/engine/storage/snapshot/pom.xml b/engine/storage/snapshot/pom.xml index eda47be41e2f..d0626ee8247c 100644 --- a/engine/storage/snapshot/pom.xml +++ b/engine/storage/snapshot/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/engine/storage/volume/pom.xml b/engine/storage/volume/pom.xml index 340010ba34bd..c21148c9deb4 100644 --- a/engine/storage/volume/pom.xml +++ b/engine/storage/volume/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-engine - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/framework/cluster/pom.xml b/framework/cluster/pom.xml index d14e5c455d13..d1d95e67f568 100644 --- a/framework/cluster/pom.xml +++ b/framework/cluster/pom.xml @@ -15,7 +15,7 @@ org.apache.cloudstack cloudstack-framework - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/framework/config/pom.xml b/framework/config/pom.xml index 349e9f29d3ad..f928863a5617 100644 --- a/framework/config/pom.xml +++ b/framework/config/pom.xml @@ -15,7 +15,7 @@ org.apache.cloudstack cloudstack-framework - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/framework/db/pom.xml b/framework/db/pom.xml index 71b9af36b4d7..6882ccbfc05b 100644 --- a/framework/db/pom.xml +++ b/framework/db/pom.xml @@ -15,7 +15,7 @@ org.apache.cloudstack cloudstack-framework - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/framework/events/pom.xml b/framework/events/pom.xml index a633a779fca5..91338730d28b 100644 --- a/framework/events/pom.xml +++ b/framework/events/pom.xml @@ -15,7 +15,7 @@ org.apache.cloudstack cloudstack-framework - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/framework/ipc/pom.xml b/framework/ipc/pom.xml index 0341ff7dea49..1e0c0d9dad8b 100644 --- a/framework/ipc/pom.xml +++ b/framework/ipc/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-framework - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/framework/jobs/pom.xml b/framework/jobs/pom.xml index ff17f483580a..29252927d81b 100644 --- a/framework/jobs/pom.xml +++ b/framework/jobs/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-framework - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/framework/managed-context/pom.xml b/framework/managed-context/pom.xml index e2058fe03f1a..894f4bd5ab56 100644 --- a/framework/managed-context/pom.xml +++ b/framework/managed-context/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-maven-standard - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../maven-standard/pom.xml diff --git a/framework/pom.xml b/framework/pom.xml index a60841c75055..1dc3c7ecff03 100644 --- a/framework/pom.xml +++ b/framework/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT install diff --git a/framework/quota/pom.xml b/framework/quota/pom.xml index 82d43cdd4d5a..fc1b70bf7ae2 100644 --- a/framework/quota/pom.xml +++ b/framework/quota/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-framework - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/framework/rest/pom.xml b/framework/rest/pom.xml index 03aad768dcb1..94ded041e986 100644 --- a/framework/rest/pom.xml +++ b/framework/rest/pom.xml @@ -22,7 +22,7 @@ org.apache.cloudstack cloudstack-framework - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml cloud-framework-rest diff --git a/framework/security/pom.xml b/framework/security/pom.xml index cfe04de079c2..fcff26b261b8 100644 --- a/framework/security/pom.xml +++ b/framework/security/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-framework - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/framework/spring/lifecycle/pom.xml b/framework/spring/lifecycle/pom.xml index 93160c472c75..de892c57ab69 100644 --- a/framework/spring/lifecycle/pom.xml +++ b/framework/spring/lifecycle/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-maven-standard - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../../maven-standard/pom.xml diff --git a/framework/spring/module/pom.xml b/framework/spring/module/pom.xml index 6f89f53530b0..f288b14fa858 100644 --- a/framework/spring/module/pom.xml +++ b/framework/spring/module/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-maven-standard - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../../maven-standard/pom.xml diff --git a/maven-standard/pom.xml b/maven-standard/pom.xml index 6f8530a0f517..9480d813bd40 100644 --- a/maven-standard/pom.xml +++ b/maven-standard/pom.xml @@ -25,7 +25,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/plugins/acl/dynamic-role-based/pom.xml b/plugins/acl/dynamic-role-based/pom.xml index 96d7e53d1ce9..b049b44aff9c 100644 --- a/plugins/acl/dynamic-role-based/pom.xml +++ b/plugins/acl/dynamic-role-based/pom.xml @@ -26,7 +26,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/acl/static-role-based/pom.xml b/plugins/acl/static-role-based/pom.xml index 62ba16696ffd..b9073c144172 100644 --- a/plugins/acl/static-role-based/pom.xml +++ b/plugins/acl/static-role-based/pom.xml @@ -26,7 +26,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/affinity-group-processors/explicit-dedication/pom.xml b/plugins/affinity-group-processors/explicit-dedication/pom.xml index d4fc0f1a2c51..8bddb66d1550 100644 --- a/plugins/affinity-group-processors/explicit-dedication/pom.xml +++ b/plugins/affinity-group-processors/explicit-dedication/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/affinity-group-processors/host-anti-affinity/pom.xml b/plugins/affinity-group-processors/host-anti-affinity/pom.xml index 58e205d18b29..dc89ab746a84 100644 --- a/plugins/affinity-group-processors/host-anti-affinity/pom.xml +++ b/plugins/affinity-group-processors/host-anti-affinity/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/alert-handlers/snmp-alerts/pom.xml b/plugins/alert-handlers/snmp-alerts/pom.xml index 7cbca893fd52..5b1503ff8882 100644 --- a/plugins/alert-handlers/snmp-alerts/pom.xml +++ b/plugins/alert-handlers/snmp-alerts/pom.xml @@ -22,7 +22,7 @@ cloudstack-plugins org.apache.cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/plugins/alert-handlers/syslog-alerts/pom.xml b/plugins/alert-handlers/syslog-alerts/pom.xml index e4616acd072a..1d3acb498b18 100644 --- a/plugins/alert-handlers/syslog-alerts/pom.xml +++ b/plugins/alert-handlers/syslog-alerts/pom.xml @@ -22,7 +22,7 @@ cloudstack-plugins org.apache.cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml 4.0.0 diff --git a/plugins/api/discovery/pom.xml b/plugins/api/discovery/pom.xml index 049061abfaae..9551fa62ac84 100644 --- a/plugins/api/discovery/pom.xml +++ b/plugins/api/discovery/pom.xml @@ -26,7 +26,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/api/rate-limit/pom.xml b/plugins/api/rate-limit/pom.xml index 3e826c2b495d..1a82334d1e9a 100644 --- a/plugins/api/rate-limit/pom.xml +++ b/plugins/api/rate-limit/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/api/solidfire-intg-test/pom.xml b/plugins/api/solidfire-intg-test/pom.xml index fa5302b14b54..00b685700fb5 100644 --- a/plugins/api/solidfire-intg-test/pom.xml +++ b/plugins/api/solidfire-intg-test/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/database/mysql-ha/pom.xml b/plugins/database/mysql-ha/pom.xml index c275743cefa3..319fb15f42a9 100644 --- a/plugins/database/mysql-ha/pom.xml +++ b/plugins/database/mysql-ha/pom.xml @@ -15,7 +15,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/database/quota/pom.xml b/plugins/database/quota/pom.xml index b429c0ddd9e4..082fdd8a6557 100644 --- a/plugins/database/quota/pom.xml +++ b/plugins/database/quota/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/dedicated-resources/pom.xml b/plugins/dedicated-resources/pom.xml index 040e936d49ca..abab02355cee 100644 --- a/plugins/dedicated-resources/pom.xml +++ b/plugins/dedicated-resources/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/plugins/deployment-planners/implicit-dedication/pom.xml b/plugins/deployment-planners/implicit-dedication/pom.xml index f0f817ce9fb6..fd81dd55b6dd 100644 --- a/plugins/deployment-planners/implicit-dedication/pom.xml +++ b/plugins/deployment-planners/implicit-dedication/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/deployment-planners/user-concentrated-pod/pom.xml b/plugins/deployment-planners/user-concentrated-pod/pom.xml index d27188dcc878..5a71b01e545f 100644 --- a/plugins/deployment-planners/user-concentrated-pod/pom.xml +++ b/plugins/deployment-planners/user-concentrated-pod/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/deployment-planners/user-dispersing/pom.xml b/plugins/deployment-planners/user-dispersing/pom.xml index 413993f24f4e..d9ff3cfd87a5 100644 --- a/plugins/deployment-planners/user-dispersing/pom.xml +++ b/plugins/deployment-planners/user-dispersing/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/event-bus/inmemory/pom.xml b/plugins/event-bus/inmemory/pom.xml index efbf29086df4..8f284a260529 100644 --- a/plugins/event-bus/inmemory/pom.xml +++ b/plugins/event-bus/inmemory/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/event-bus/kafka/pom.xml b/plugins/event-bus/kafka/pom.xml index 2bae70344369..a6cf9486a481 100644 --- a/plugins/event-bus/kafka/pom.xml +++ b/plugins/event-bus/kafka/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/event-bus/rabbitmq/pom.xml b/plugins/event-bus/rabbitmq/pom.xml index 7e797099c050..2bbe15a8b93c 100644 --- a/plugins/event-bus/rabbitmq/pom.xml +++ b/plugins/event-bus/rabbitmq/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/file-systems/netapp/pom.xml b/plugins/file-systems/netapp/pom.xml index 2b2c7bd92313..0414c313417e 100644 --- a/plugins/file-systems/netapp/pom.xml +++ b/plugins/file-systems/netapp/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/ha-planners/skip-heurestics/pom.xml b/plugins/ha-planners/skip-heurestics/pom.xml index 1511f8da0d3f..3480f79c0aa1 100644 --- a/plugins/ha-planners/skip-heurestics/pom.xml +++ b/plugins/ha-planners/skip-heurestics/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/host-allocators/random/pom.xml b/plugins/host-allocators/random/pom.xml index 62284c229fa0..3c5a64cb7657 100644 --- a/plugins/host-allocators/random/pom.xml +++ b/plugins/host-allocators/random/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/hypervisors/baremetal/pom.xml b/plugins/hypervisors/baremetal/pom.xml index 6e569cf680be..e6b4a9160cf0 100755 --- a/plugins/hypervisors/baremetal/pom.xml +++ b/plugins/hypervisors/baremetal/pom.xml @@ -21,7 +21,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml cloud-plugin-hypervisor-baremetal diff --git a/plugins/hypervisors/hyperv/pom.xml b/plugins/hypervisors/hyperv/pom.xml index 165eaf6cf4d2..f6017c0fbc93 100644 --- a/plugins/hypervisors/hyperv/pom.xml +++ b/plugins/hypervisors/hyperv/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/hypervisors/kvm/pom.xml b/plugins/hypervisors/kvm/pom.xml index f8100a50c067..f87f2ec68b8f 100644 --- a/plugins/hypervisors/kvm/pom.xml +++ b/plugins/hypervisors/kvm/pom.xml @@ -15,7 +15,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/hypervisors/ovm/pom.xml b/plugins/hypervisors/ovm/pom.xml index e754e632a8da..584910aca205 100644 --- a/plugins/hypervisors/ovm/pom.xml +++ b/plugins/hypervisors/ovm/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/hypervisors/ovm3/pom.xml b/plugins/hypervisors/ovm3/pom.xml index 8da1430137cb..1dc6c1a3cc62 100644 --- a/plugins/hypervisors/ovm3/pom.xml +++ b/plugins/hypervisors/ovm3/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/hypervisors/simulator/pom.xml b/plugins/hypervisors/simulator/pom.xml index dbebba0146cf..58e712533f7e 100644 --- a/plugins/hypervisors/simulator/pom.xml +++ b/plugins/hypervisors/simulator/pom.xml @@ -22,7 +22,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml cloud-plugin-hypervisor-simulator diff --git a/plugins/hypervisors/ucs/pom.xml b/plugins/hypervisors/ucs/pom.xml index c8a14d19b07d..e763411cd492 100755 --- a/plugins/hypervisors/ucs/pom.xml +++ b/plugins/hypervisors/ucs/pom.xml @@ -15,7 +15,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml cloud-plugin-hypervisor-ucs diff --git a/plugins/hypervisors/vmware/pom.xml b/plugins/hypervisors/vmware/pom.xml index 455fb5819d63..9a4264bbe2f0 100644 --- a/plugins/hypervisors/vmware/pom.xml +++ b/plugins/hypervisors/vmware/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/hypervisors/xenserver/pom.xml b/plugins/hypervisors/xenserver/pom.xml index d03ae541a5c9..495a632173f0 100644 --- a/plugins/hypervisors/xenserver/pom.xml +++ b/plugins/hypervisors/xenserver/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/bigswitch/pom.xml b/plugins/network-elements/bigswitch/pom.xml index 05603a61fd09..3e93b37d59ae 100644 --- a/plugins/network-elements/bigswitch/pom.xml +++ b/plugins/network-elements/bigswitch/pom.xml @@ -25,7 +25,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/brocade-vcs/pom.xml b/plugins/network-elements/brocade-vcs/pom.xml index c0ed17c1449c..2f16048d598f 100644 --- a/plugins/network-elements/brocade-vcs/pom.xml +++ b/plugins/network-elements/brocade-vcs/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/cisco-vnmc/pom.xml b/plugins/network-elements/cisco-vnmc/pom.xml index 2c8bb7f7194b..74c77b8c0eb0 100644 --- a/plugins/network-elements/cisco-vnmc/pom.xml +++ b/plugins/network-elements/cisco-vnmc/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/dns-notifier/pom.xml b/plugins/network-elements/dns-notifier/pom.xml index 7db075f0aaff..f8ae5de82844 100644 --- a/plugins/network-elements/dns-notifier/pom.xml +++ b/plugins/network-elements/dns-notifier/pom.xml @@ -22,7 +22,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml cloud-plugin-example-dns-notifier diff --git a/plugins/network-elements/elastic-loadbalancer/pom.xml b/plugins/network-elements/elastic-loadbalancer/pom.xml index c52cc042debe..e867f5e353ea 100644 --- a/plugins/network-elements/elastic-loadbalancer/pom.xml +++ b/plugins/network-elements/elastic-loadbalancer/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/f5/pom.xml b/plugins/network-elements/f5/pom.xml index 54533e7aa76d..e41cb12fa51a 100644 --- a/plugins/network-elements/f5/pom.xml +++ b/plugins/network-elements/f5/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/globodns/pom.xml b/plugins/network-elements/globodns/pom.xml index 9d2c3547192c..8b3dedba017c 100644 --- a/plugins/network-elements/globodns/pom.xml +++ b/plugins/network-elements/globodns/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/internal-loadbalancer/pom.xml b/plugins/network-elements/internal-loadbalancer/pom.xml index 0eb15232540b..4b9e636ad831 100644 --- a/plugins/network-elements/internal-loadbalancer/pom.xml +++ b/plugins/network-elements/internal-loadbalancer/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/juniper-contrail/pom.xml b/plugins/network-elements/juniper-contrail/pom.xml index 13a3c5b6e829..c478360f7a59 100644 --- a/plugins/network-elements/juniper-contrail/pom.xml +++ b/plugins/network-elements/juniper-contrail/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/juniper-srx/pom.xml b/plugins/network-elements/juniper-srx/pom.xml index ba3ecd7a95d6..446ee9ef91fc 100644 --- a/plugins/network-elements/juniper-srx/pom.xml +++ b/plugins/network-elements/juniper-srx/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/midonet/pom.xml b/plugins/network-elements/midonet/pom.xml index 00ca4aef7c18..44e636d9b336 100644 --- a/plugins/network-elements/midonet/pom.xml +++ b/plugins/network-elements/midonet/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/netscaler/pom.xml b/plugins/network-elements/netscaler/pom.xml index d7b54e92e74f..52f44c8a9bd1 100644 --- a/plugins/network-elements/netscaler/pom.xml +++ b/plugins/network-elements/netscaler/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/nicira-nvp/pom.xml b/plugins/network-elements/nicira-nvp/pom.xml index 7ed0f235d6c0..0a1ce190c072 100644 --- a/plugins/network-elements/nicira-nvp/pom.xml +++ b/plugins/network-elements/nicira-nvp/pom.xml @@ -26,7 +26,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml @@ -34,7 +34,7 @@ org.apache.cloudstack cloud-utils - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT test-jar test diff --git a/plugins/network-elements/nuage-vsp/pom.xml b/plugins/network-elements/nuage-vsp/pom.xml index 7369ba5fa779..ae1ac5c73867 100644 --- a/plugins/network-elements/nuage-vsp/pom.xml +++ b/plugins/network-elements/nuage-vsp/pom.xml @@ -25,7 +25,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/opendaylight/pom.xml b/plugins/network-elements/opendaylight/pom.xml index a5f2a2678166..ee2781c91651 100644 --- a/plugins/network-elements/opendaylight/pom.xml +++ b/plugins/network-elements/opendaylight/pom.xml @@ -25,7 +25,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/ovs/pom.xml b/plugins/network-elements/ovs/pom.xml index 4f5adf9d65c1..5e4e0345e26f 100644 --- a/plugins/network-elements/ovs/pom.xml +++ b/plugins/network-elements/ovs/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/palo-alto/pom.xml b/plugins/network-elements/palo-alto/pom.xml index 555ef7ab3f2d..4d37294f406b 100644 --- a/plugins/network-elements/palo-alto/pom.xml +++ b/plugins/network-elements/palo-alto/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/stratosphere-ssp/pom.xml b/plugins/network-elements/stratosphere-ssp/pom.xml index 89e84db27baa..e49da1bd0f0f 100644 --- a/plugins/network-elements/stratosphere-ssp/pom.xml +++ b/plugins/network-elements/stratosphere-ssp/pom.xml @@ -25,7 +25,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/network-elements/vxlan/pom.xml b/plugins/network-elements/vxlan/pom.xml index 46e5af03ed9c..2a695d765483 100644 --- a/plugins/network-elements/vxlan/pom.xml +++ b/plugins/network-elements/vxlan/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/outofbandmanagement-drivers/ipmitool/pom.xml b/plugins/outofbandmanagement-drivers/ipmitool/pom.xml index be4cd86befeb..0c15fbbc6cbd 100644 --- a/plugins/outofbandmanagement-drivers/ipmitool/pom.xml +++ b/plugins/outofbandmanagement-drivers/ipmitool/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/pom.xml b/plugins/pom.xml index d05e63041137..990525c5339b 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -26,7 +26,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT diff --git a/plugins/storage-allocators/random/pom.xml b/plugins/storage-allocators/random/pom.xml index 87cd5b582cdb..4921b0adb736 100644 --- a/plugins/storage-allocators/random/pom.xml +++ b/plugins/storage-allocators/random/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/storage/image/default/pom.xml b/plugins/storage/image/default/pom.xml index b9c641cfd9d8..dfa8d7881e50 100644 --- a/plugins/storage/image/default/pom.xml +++ b/plugins/storage/image/default/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../../pom.xml diff --git a/plugins/storage/image/s3/pom.xml b/plugins/storage/image/s3/pom.xml index d7ee4372d05f..56cde071d89d 100644 --- a/plugins/storage/image/s3/pom.xml +++ b/plugins/storage/image/s3/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../../pom.xml diff --git a/plugins/storage/image/sample/pom.xml b/plugins/storage/image/sample/pom.xml index c63929e79a69..6f35ef3533fe 100644 --- a/plugins/storage/image/sample/pom.xml +++ b/plugins/storage/image/sample/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../../pom.xml diff --git a/plugins/storage/image/swift/pom.xml b/plugins/storage/image/swift/pom.xml index a6c7dd1164c7..7d2389fffe8a 100644 --- a/plugins/storage/image/swift/pom.xml +++ b/plugins/storage/image/swift/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../../pom.xml diff --git a/plugins/storage/volume/cloudbyte/pom.xml b/plugins/storage/volume/cloudbyte/pom.xml index 5372ab801344..c1ed51c12865 100755 --- a/plugins/storage/volume/cloudbyte/pom.xml +++ b/plugins/storage/volume/cloudbyte/pom.xml @@ -26,7 +26,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../../pom.xml diff --git a/plugins/storage/volume/default/pom.xml b/plugins/storage/volume/default/pom.xml index 1cba83893948..045b32e0dfe7 100644 --- a/plugins/storage/volume/default/pom.xml +++ b/plugins/storage/volume/default/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../../pom.xml diff --git a/plugins/storage/volume/nexenta/pom.xml b/plugins/storage/volume/nexenta/pom.xml index e4d8ebe069fd..fe3eb28e4f71 100644 --- a/plugins/storage/volume/nexenta/pom.xml +++ b/plugins/storage/volume/nexenta/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../../pom.xml diff --git a/plugins/storage/volume/sample/pom.xml b/plugins/storage/volume/sample/pom.xml index d649df248ddf..6306d1c4f3c3 100644 --- a/plugins/storage/volume/sample/pom.xml +++ b/plugins/storage/volume/sample/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../../pom.xml diff --git a/plugins/storage/volume/solidfire/pom.xml b/plugins/storage/volume/solidfire/pom.xml index 21b3636ddbc9..37fea08804d2 100644 --- a/plugins/storage/volume/solidfire/pom.xml +++ b/plugins/storage/volume/solidfire/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../../pom.xml diff --git a/plugins/user-authenticators/ldap/pom.xml b/plugins/user-authenticators/ldap/pom.xml index 7b3b8f76e162..78131a623dd2 100644 --- a/plugins/user-authenticators/ldap/pom.xml +++ b/plugins/user-authenticators/ldap/pom.xml @@ -15,7 +15,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/user-authenticators/md5/pom.xml b/plugins/user-authenticators/md5/pom.xml index f436f90e32e9..13b55116547e 100644 --- a/plugins/user-authenticators/md5/pom.xml +++ b/plugins/user-authenticators/md5/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/user-authenticators/pbkdf2/pom.xml b/plugins/user-authenticators/pbkdf2/pom.xml index b26636bd7e01..c60b22e79aa9 100644 --- a/plugins/user-authenticators/pbkdf2/pom.xml +++ b/plugins/user-authenticators/pbkdf2/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/user-authenticators/plain-text/pom.xml b/plugins/user-authenticators/plain-text/pom.xml index 3598cc168d6e..236811ec71a7 100644 --- a/plugins/user-authenticators/plain-text/pom.xml +++ b/plugins/user-authenticators/plain-text/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/user-authenticators/saml2/pom.xml b/plugins/user-authenticators/saml2/pom.xml index 5e24df35c558..076a430a7878 100644 --- a/plugins/user-authenticators/saml2/pom.xml +++ b/plugins/user-authenticators/saml2/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/plugins/user-authenticators/sha256salted/pom.xml b/plugins/user-authenticators/sha256salted/pom.xml index fe5fa6f5eda9..c1f140ce6a41 100644 --- a/plugins/user-authenticators/sha256salted/pom.xml +++ b/plugins/user-authenticators/sha256salted/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-plugins - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/pom.xml b/pom.xml index ecce49ba1df8..21d8899b234d 100644 --- a/pom.xml +++ b/pom.xml @@ -30,7 +30,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT pom Apache CloudStack Apache CloudStack is an IaaS (“Infrastructure as a Service”) cloud orchestration platform. diff --git a/quickcloud/pom.xml b/quickcloud/pom.xml index 841a9c518555..45ea39744e74 100644 --- a/quickcloud/pom.xml +++ b/quickcloud/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-maven-standard - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../maven-standard/pom.xml diff --git a/server/pom.xml b/server/pom.xml index 32695094e68d..3edfedd84dfa 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -15,7 +15,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT diff --git a/services/console-proxy-rdp/rdpconsole/pom.xml b/services/console-proxy-rdp/rdpconsole/pom.xml index 628d039a6e63..2dea1fe21d57 100755 --- a/services/console-proxy-rdp/rdpconsole/pom.xml +++ b/services/console-proxy-rdp/rdpconsole/pom.xml @@ -27,7 +27,7 @@ org.apache.cloudstack cloudstack-services - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/services/console-proxy/plugin/pom.xml b/services/console-proxy/plugin/pom.xml index dbaf85791c77..6aad3fdd05da 100644 --- a/services/console-proxy/plugin/pom.xml +++ b/services/console-proxy/plugin/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-service-console-proxy - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/services/console-proxy/pom.xml b/services/console-proxy/pom.xml index 9d5feec38689..dc25fb392ae2 100644 --- a/services/console-proxy/pom.xml +++ b/services/console-proxy/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack-services - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/services/console-proxy/server/pom.xml b/services/console-proxy/server/pom.xml index 541d242013ef..0df07954b547 100644 --- a/services/console-proxy/server/pom.xml +++ b/services/console-proxy/server/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-service-console-proxy - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/services/iam/plugin/pom.xml b/services/iam/plugin/pom.xml index 974d2166bf68..156e9fb833a3 100644 --- a/services/iam/plugin/pom.xml +++ b/services/iam/plugin/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-service-iam - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/services/iam/server/pom.xml b/services/iam/server/pom.xml index b2ad0a4f52ec..d3e6d45feee4 100644 --- a/services/iam/server/pom.xml +++ b/services/iam/server/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-service-iam - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/services/pom.xml b/services/pom.xml index f315b5e8b1c0..de2027051a11 100644 --- a/services/pom.xml +++ b/services/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/services/secondary-storage/controller/pom.xml b/services/secondary-storage/controller/pom.xml index bb8878010f0b..89901a47440b 100644 --- a/services/secondary-storage/controller/pom.xml +++ b/services/secondary-storage/controller/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-service-secondary-storage - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/services/secondary-storage/pom.xml b/services/secondary-storage/pom.xml index bce61d66f150..7e6c136c9ea1 100644 --- a/services/secondary-storage/pom.xml +++ b/services/secondary-storage/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack-services - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/services/secondary-storage/server/pom.xml b/services/secondary-storage/server/pom.xml index 50155cfb610f..3c4d4a81b02f 100644 --- a/services/secondary-storage/server/pom.xml +++ b/services/secondary-storage/server/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack-service-secondary-storage - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/systemvm/pom.xml b/systemvm/pom.xml index c72b56df6732..2a3ed05e6332 100644 --- a/systemvm/pom.xml +++ b/systemvm/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/test/pom.xml b/test/pom.xml index 3f1119f9cb4b..8c1ac9e012ba 100644 --- a/test/pom.xml +++ b/test/pom.xml @@ -23,7 +23,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT diff --git a/tools/apidoc/pom.xml b/tools/apidoc/pom.xml index 63af063bfc41..c6a2a9920617 100644 --- a/tools/apidoc/pom.xml +++ b/tools/apidoc/pom.xml @@ -17,7 +17,7 @@ org.apache.cloudstack cloud-tools - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/tools/checkstyle/pom.xml b/tools/checkstyle/pom.xml index c0c39c956dab..9e0180c35eac 100644 --- a/tools/checkstyle/pom.xml +++ b/tools/checkstyle/pom.xml @@ -24,7 +24,7 @@ Apache CloudStack Developer Tools - Checkstyle Configuration org.apache.cloudstack checkstyle - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT diff --git a/tools/devcloud-kvm/pom.xml b/tools/devcloud-kvm/pom.xml index b0e12341c3d6..82e0d82b2afb 100644 --- a/tools/devcloud-kvm/pom.xml +++ b/tools/devcloud-kvm/pom.xml @@ -17,7 +17,7 @@ org.apache.cloudstack cloud-tools - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/tools/devcloud/pom.xml b/tools/devcloud/pom.xml index 8644ff45a5a4..aa11a662a4d6 100644 --- a/tools/devcloud/pom.xml +++ b/tools/devcloud/pom.xml @@ -17,7 +17,7 @@ org.apache.cloudstack cloud-tools - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/tools/devcloud4/pom.xml b/tools/devcloud4/pom.xml index e3cee0c2b8e6..bd5102eba51c 100644 --- a/tools/devcloud4/pom.xml +++ b/tools/devcloud4/pom.xml @@ -17,7 +17,7 @@ org.apache.cloudstack cloud-tools - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/tools/marvin/pom.xml b/tools/marvin/pom.xml index 391987be4b54..ad429930ebe9 100644 --- a/tools/marvin/pom.xml +++ b/tools/marvin/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloud-tools - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/tools/marvin/setup.py b/tools/marvin/setup.py index 7f3d9c8ca310..fbb483260965 100644 --- a/tools/marvin/setup.py +++ b/tools/marvin/setup.py @@ -27,7 +27,7 @@ raise RuntimeError("python setuptools is required to build Marvin") -VERSION = "4.9.0-SNAPSHOT" +VERSION = "4.10.0-SNAPSHOT" setup(name="Marvin", version=VERSION, diff --git a/tools/pom.xml b/tools/pom.xml index 02ad3185db2a..f94108e5cf3a 100644 --- a/tools/pom.xml +++ b/tools/pom.xml @@ -27,7 +27,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/tools/wix-cloudstack-maven-plugin/pom.xml b/tools/wix-cloudstack-maven-plugin/pom.xml index deb4284b3d22..2638fecfaf92 100644 --- a/tools/wix-cloudstack-maven-plugin/pom.xml +++ b/tools/wix-cloudstack-maven-plugin/pom.xml @@ -16,7 +16,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../../pom.xml diff --git a/usage/pom.xml b/usage/pom.xml index dd80e82ce775..bd1b95e210b6 100644 --- a/usage/pom.xml +++ b/usage/pom.xml @@ -15,7 +15,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT diff --git a/utils/pom.xml b/utils/pom.xml index 206eb1896a6a..2c8252cc2ad5 100755 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -26,7 +26,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT ../pom.xml diff --git a/vmware-base/pom.xml b/vmware-base/pom.xml index e18dc8ea145c..9c6a7eaed040 100644 --- a/vmware-base/pom.xml +++ b/vmware-base/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.9.0-SNAPSHOT + 4.10.0-SNAPSHOT From 546a3f8884398391760b76ddcf02e6bc1f30d642 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Thu, 4 Aug 2016 13:03:46 +0530 Subject: [PATCH 009/687] cloudstack: fix upgrade paths to 4.10.0 Signed-off-by: Rohit Yadav --- .../cloud/upgrade/DatabaseUpgradeChecker.java | 109 +++++++++--------- .../cloud/upgrade/dao/Upgrade490to4100.java | 66 +++++++++++ setup/db/db/schema-490to4100-cleanup.sql | 20 ++++ setup/db/db/schema-490to4100.sql | 20 ++++ 4 files changed, 162 insertions(+), 53 deletions(-) create mode 100644 engine/schema/src/com/cloud/upgrade/dao/Upgrade490to4100.java create mode 100644 setup/db/db/schema-490to4100-cleanup.sql create mode 100644 setup/db/db/schema-490to4100.sql diff --git a/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java index fc44c4f5e376..20ba0b3cfdd2 100644 --- a/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -64,6 +64,7 @@ import com.cloud.upgrade.dao.Upgrade471to480; import com.cloud.upgrade.dao.Upgrade480to481; import com.cloud.upgrade.dao.Upgrade481to490; +import com.cloud.upgrade.dao.Upgrade490to4100; import com.cloud.upgrade.dao.UpgradeSnapshot217to224; import com.cloud.upgrade.dao.UpgradeSnapshot223to224; import com.cloud.upgrade.dao.VersionDao; @@ -111,7 +112,7 @@ public DatabaseUpgradeChecker() { new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), - new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.1.8", new DbUpgrade[] {new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), @@ -119,7 +120,7 @@ public DatabaseUpgradeChecker() { new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), - new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.1.9", new DbUpgrade[] {new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), @@ -127,167 +128,169 @@ public DatabaseUpgradeChecker() { new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), - new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.2.1", new DbUpgrade[] {new Upgrade221to222(), new UpgradeSnapshot223to224(), new Upgrade222to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), - new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.2.2", new DbUpgrade[] {new Upgrade222to224(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), - new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.2.3", new DbUpgrade[] {new Upgrade222to224(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), - new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.2.4", new DbUpgrade[] {new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), - new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.2.5", new DbUpgrade[] {new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), - new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.2.6", new DbUpgrade[] {new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), - new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.2.7", new DbUpgrade[] {new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), - new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.2.8", new DbUpgrade[] {new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30() , new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), - new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.2.9", new DbUpgrade[] {new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), - new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.2.10", new DbUpgrade[] {new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), - new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.2.12", new DbUpgrade[] {new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), - new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.2.13", new DbUpgrade[] {new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), - new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.2.14", new DbUpgrade[] {new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), - new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("3.0.0", new DbUpgrade[] {new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), - new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("3.0.1", new DbUpgrade[] {new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), - new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("3.0.2", new DbUpgrade[] {new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), - new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.0.0", new DbUpgrade[] {new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.0.0", new DbUpgrade[] {new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.0.1", new DbUpgrade[] {new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.0.1", new DbUpgrade[] {new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.0.2", new DbUpgrade[] {new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.0.2", new DbUpgrade[] {new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.1.0", new DbUpgrade[] {new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.1.0", new DbUpgrade[] {new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.1.1", new DbUpgrade[] {new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.1.1", new DbUpgrade[] {new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.2.0", new DbUpgrade[] {new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.2.0", new DbUpgrade[] {new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.2.1", new DbUpgrade[] {new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.2.1", new DbUpgrade[] {new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.3.0", new DbUpgrade[] {new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.3.0", new DbUpgrade[] {new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.3.1", new DbUpgrade[] {new Upgrade431to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.3.1", new DbUpgrade[] {new Upgrade431to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.3.2", new DbUpgrade[] {new Upgrade432to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.3.2", new DbUpgrade[] {new Upgrade432to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.4.0", new DbUpgrade[] {new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.4.0", new DbUpgrade[] {new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.4.1", new DbUpgrade[] {new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490() }); + _upgradeMap.put("4.4.1", new DbUpgrade[] {new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100() }); - _upgradeMap.put("4.4.2", new DbUpgrade[] {new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.4.2", new DbUpgrade[] {new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.4.3", new DbUpgrade[] {new Upgrade443to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.4.3", new DbUpgrade[] {new Upgrade443to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.4.4", new DbUpgrade[] {new Upgrade444to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.4.4", new DbUpgrade[] {new Upgrade444to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.5.0", new DbUpgrade[] {new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.5.0", new DbUpgrade[] {new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.5.1", new DbUpgrade[] {new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.5.1", new DbUpgrade[] {new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.5.2", new DbUpgrade[] {new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.5.2", new DbUpgrade[] {new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.5.3", new DbUpgrade[] {new Upgrade453to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.5.3", new DbUpgrade[] {new Upgrade453to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.6.0", new DbUpgrade[] {new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.6.0", new DbUpgrade[] {new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.6.1", new DbUpgrade[] {new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.6.1", new DbUpgrade[] {new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.6.2", new DbUpgrade[] {new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.6.2", new DbUpgrade[] {new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.7.0", new DbUpgrade[] {new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.7.0", new DbUpgrade[] {new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.7.1", new DbUpgrade[] {new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.7.1", new DbUpgrade[] {new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.7.2", new DbUpgrade[] {new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.7.2", new DbUpgrade[] {new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.8.0", new DbUpgrade[] {new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("4.8.0", new DbUpgrade[] {new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("4.8.1", new DbUpgrade[] {new Upgrade481to490()}); + _upgradeMap.put("4.8.1", new DbUpgrade[] {new Upgrade481to490(), new Upgrade490to4100()}); + + _upgradeMap.put("4.9.0", new DbUpgrade[] {new Upgrade490to4100()}); //CP Upgrades _upgradeMap.put("3.0.3", new DbUpgrade[] {new Upgrade303to304(), new Upgrade304to305(), new Upgrade305to306(), new Upgrade306to307(), new Upgrade307to410(), - new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("3.0.4", new DbUpgrade[] {new Upgrade304to305(), new Upgrade305to306(), new Upgrade306to307(), new Upgrade307to410(), new Upgrade410to420(), - new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("3.0.5", new DbUpgrade[] {new Upgrade305to306(), new Upgrade306to307(), new Upgrade307to410(), new Upgrade410to420(), new Upgrade420to421(), - new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("3.0.6", new DbUpgrade[] {new Upgrade306to307(), new Upgrade307to410(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), - new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); - _upgradeMap.put("3.0.7", new DbUpgrade[] {new Upgrade307to410(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + _upgradeMap.put("3.0.7", new DbUpgrade[] {new Upgrade307to410(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.2.15", new DbUpgrade[] {new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to303(), new Upgrade303to304(), new Upgrade304to305(), new Upgrade305to306(), new Upgrade306to307(), new Upgrade307to410(), new Upgrade410to420(), - new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); _upgradeMap.put("2.2.16", new DbUpgrade[] {new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to303(), new Upgrade303to304(), new Upgrade304to305(), new Upgrade305to306(), new Upgrade306to307(), new Upgrade307to410(), new Upgrade410to420(), - new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490()}); + new Upgrade420to421(), new Upgrade421to430(), new Upgrade430to440(), new Upgrade440to441(), new Upgrade441to442(), new Upgrade442to450(), new Upgrade450to451(), new Upgrade451to452(), new Upgrade452to460(), new Upgrade460to461(), new Upgrade461to470(), new Upgrade470to471(), new Upgrade471to480(), new Upgrade480to481(), new Upgrade481to490(), new Upgrade490to4100()}); } protected void runScript(Connection conn, File file) { diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade490to4100.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade490to4100.java new file mode 100644 index 000000000000..b9976d16eb30 --- /dev/null +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade490to4100.java @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; +import org.apache.log4j.Logger; + +import java.io.File; +import java.sql.Connection; + +public class Upgrade490to4100 implements DbUpgrade { + final static Logger LOG = Logger.getLogger(Upgrade490to4100.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] {"4.9.0", "4.10.0"}; + } + + @Override + public String getUpgradedVersion() { + return "4.10.0"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public File[] getPrepareScripts() { + String script = Script.findScript("", "db/schema-490to4100.sql"); + if (script == null) { + throw new CloudRuntimeException("Unable to find db/schema-490to4100.sql"); + } + return new File[] {new File(script)}; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public File[] getCleanupScripts() { + String script = Script.findScript("", "db/schema-490to4100-cleanup.sql"); + if (script == null) { + throw new CloudRuntimeException("Unable to find db/schema-490to4100-cleanup.sql"); + } + return new File[] {new File(script)}; + } +} diff --git a/setup/db/db/schema-490to4100-cleanup.sql b/setup/db/db/schema-490to4100-cleanup.sql new file mode 100644 index 000000000000..a12f63546b79 --- /dev/null +++ b/setup/db/db/schema-490to4100-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.9.0 to 4.10.0; +--; diff --git a/setup/db/db/schema-490to4100.sql b/setup/db/db/schema-490to4100.sql new file mode 100644 index 000000000000..48349a701ef5 --- /dev/null +++ b/setup/db/db/schema-490to4100.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.9.0 to 4.10.0; +--; From fd7273b446738c0ebfae84189502dbdcd18bfd42 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Thu, 4 Aug 2016 15:31:51 +0530 Subject: [PATCH 010/687] server: give more memory to tests Increases allowed max and permgen memory flags to maven-surefire plugins. This fixes unit test failures in cloud-server. Signed-off-by: Rohit Yadav --- server/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/pom.xml b/server/pom.xml index 3edfedd84dfa..988a96ff0bb6 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -180,7 +180,7 @@ org.apache.maven.plugins maven-surefire-plugin - -Xmx1024m -Djava.security.egd=file:/dev/./urandom + -Xmx2048m -XX:MaxPermSize=512m -Djava.security.egd=file:/dev/./urandom %regex[.*[0-9]*To[0-9]*.*Test.*] com/cloud/upgrade/AdvanceZone223To224UpgradeTest From 4c387feebbffdb6724fffa28f67c65fdb82bcdc2 Mon Sep 17 00:00:00 2001 From: Rajani Karuturi Date: Fri, 5 Aug 2016 15:06:52 +0530 Subject: [PATCH 011/687] updated contributin.md As this is the doc that is showed while creating a pull request, updated it with information from release principles wiki. --- CONTRIBUTING.md | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c4ad1974eb63..6ac3ad56dd4d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,10 +3,26 @@ Contributing to Apache CloudStack (ACS) Summary ------- -This document covers how to contribute to the ACS project. These instructions assume you have a GitHub.com account, so if you don't have one you will have to create one. Your proposed code changes will be published to your own fork of the ACS project and you will submit a Pull Request for your changes to be added. +This document covers how to contribute to the ACS project. ACS uses github PRs to manage code contributions. +These instructions assume you have a GitHub.com account, so if you don't have one you will have to create one. Your proposed code changes will be published to your own fork of the ACS project and you will submit a Pull Request for your changes to be added. _Lets get started!!!_ +Bug fixes +--------- + +It's very important that we can easily track bug fix commits, so their hashes should remain the same in all branches. +Therefore, a pull request (PR) that fixes a bug, should be sent against a release branch. +This can be either the "current release" or the "previous release", depending on which ones are maintained. +Since the goal is a stable master, bug fixes should be "merged forward" to the next branch in order: "previous release" -> "current release" -> master (in other words: old to new) + +Developing new features +----------------------- + +Development should be done in a feature branch, branched off of master. +Send a PR(steps below) to get it into master (2x LGTM applies). +PR will only be merged when master is open, will be held otherwise until master is open again. +No back porting / cherry-picking features to existing branches! Fork the code ------------- @@ -30,6 +46,7 @@ $ git rebase upstream/master Making changes -------------- + It is important that you create a new branch to make changes on and that you do not change the `master` branch (other than to rebase in changes from `upstream/master`). In this example I will assume you will be making your changes to a branch called `feature_x`. This `feature_x` branch will be created on your local repository and will be pushed to your forked repository on GitHub. Once this branch is on your fork you will create a Pull Request for the changes to be added to the ACS project. It is best practice to create a new branch each time you want to contribute to the project and only track the changes for that pull request in this branch. @@ -72,6 +89,8 @@ Make a GitHub Pull Request to contribute your changes When you are happy with your changes and you are ready to contribute them, you will create a Pull Request on GitHub to do so. This is done by pushing your local changes to your forked repository (default remote name is `origin`) and then initiating a pull request on GitHub. +Please include JIRA id, detailed information about the bug/feature, what all tests are executed, how the reviewer can test this feature etc. Incase of UI PRs, a screenshot is preferred. + > **IMPORTANT:** Make sure you have rebased your `feature_x` branch to include the latest code from `upstream/master` _before_ you do this. ``` bash @@ -105,3 +124,7 @@ $ git checkout master $ git branch -D feature_x $ git push origin :feature_x ``` + +Release Principles +------------------ +Detailed information about ACS release principles is available at https://cwiki.apache.org/confluence/display/CLOUDSTACK/Release+principles+for+Apache+CloudStack+4.6+and+up From 98228c5fb1d5fd6eecd46eff631e630e4b192d08 Mon Sep 17 00:00:00 2001 From: Pierre-Luc Dion Date: Sat, 6 Aug 2016 14:38:20 -0400 Subject: [PATCH 012/687] add projectid to project details page --- ui/scripts/projects.js | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ui/scripts/projects.js b/ui/scripts/projects.js index f980f21ac615..bfed329765c0 100644 --- a/ui/scripts/projects.js +++ b/ui/scripts/projects.js @@ -1013,6 +1013,9 @@ label: 'label.display.name', isEditable: true }, + id: { + label: 'label.id' + }, domain: { label: 'label.domain' }, From d87828af2449907192dfd69a22fdb9cd5780d329 Mon Sep 17 00:00:00 2001 From: Rajani Karuturi Date: Mon, 8 Aug 2016 15:02:03 +0530 Subject: [PATCH 013/687] Fix debian build error due to commit 3315eb5420e1b2cedf906aeb709740efc2f08cfd --- debian/changelog | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/debian/changelog b/debian/changelog index c85c4a592ec3..b647b3e71283 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,10 +1,10 @@ cloudstack (4.10.0-SNAPSHOT) unstable; urgency=low -cloudstack (4.9.0) unstable; urgency=low - * Update the version to 4.9.0 + * Update the version to 4.10.0.snapshot - -- the Apache CloudStack project Mon, 25 Jul 2016 16:56:03 -0400 + -- the Apache CloudStack project Mon, 08 Aug 2016 11:22:34 +0530 +cloudstack (4.9.0) unstable; urgency=low [ Remi Bergsma ] * Update the version to 4.9.0.snapshot From 3cc3aea4492bdc7b571b5e459c14b4d5b4780e67 Mon Sep 17 00:00:00 2001 From: Pierre-Luc Dion Date: Mon, 8 Aug 2016 20:04:55 -0400 Subject: [PATCH 014/687] Add to project detail page: cpu,memory,template,storage and VMs count --- ui/scripts/projects.js | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/ui/scripts/projects.js b/ui/scripts/projects.js index bfed329765c0..4f992ec4049b 100644 --- a/ui/scripts/projects.js +++ b/ui/scripts/projects.js @@ -1025,6 +1025,28 @@ state: { label: 'label.state' } + }, { + vmtotal: { + label: 'label.total.vms' + }, + memorytotal: { + label: 'label.memory.total' + }, + cputotal: { + label: 'label.total.cpu' + }, + volumetotal: { + label: 'label.volume' + }, + primarystoragetotal: { + label: 'label.primary.storage' + }, + iptotal: { + label: 'label.total.of.ip' + }, + templatetotal: { + label: 'label.template' + } }], tags: cloudStack.api.tags({ From 7a53feee22c873c4e35c86b3fc1d4647314b62bf Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Tue, 28 Jun 2016 15:53:20 +0530 Subject: [PATCH 015/687] marvin: fix codegeneration against API discovery endpoint This makes the commands.xml based codegeneration equivalent to the API discovery end point based discovery. This fixes the fields that the (api discovery based) codegenerator should produce in the generated python classes (cmd and response classes per api/module). The issue was that the autogenerated cloudstackAPI differed between api-based and apidocs-based code generation. With this fix the generated classes match exactly thereby allowing us to go with either methods to generate cloudstackAPI. Signed-off-by: Rohit Yadav --- tools/marvin/marvin/codegenerator.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/marvin/marvin/codegenerator.py b/tools/marvin/marvin/codegenerator.py index ba2a3255d1b8..14f6d1380ae4 100644 --- a/tools/marvin/marvin/codegenerator.py +++ b/tools/marvin/marvin/codegenerator.py @@ -368,6 +368,7 @@ def constructResponseFromJSON(self, response): self.constructResponseFromJSON(innerResponse) paramProperty.subProperties.append(subProperty) paramProperty.type = response['type'] + paramProperty.dataType = response['type'] return paramProperty def loadCmdFromJSON(self, apiStream): @@ -404,13 +405,14 @@ def loadCmdFromJSON(self, apiStream): assert paramProperty.name if 'required' in param: - paramProperty.required = param['required'] + paramProperty.required = str(param['required']).lower() if 'description' in param: paramProperty.desc = param['description'] if 'type' in param: paramProperty.type = param['type'] + paramProperty.dataType = param['type'] csCmd.request.append(paramProperty) From fb7f5dfa37d76de66de52b0d8a10470b436fdf1a Mon Sep 17 00:00:00 2001 From: jeff Date: Wed, 24 Aug 2016 16:19:16 +0000 Subject: [PATCH 016/687] Export UUID for zone creation event completion. --- server/src/com/cloud/configuration/ConfigurationManagerImpl.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index f8a1fac90fd1..2dd590c6c3a7 100644 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -1770,6 +1770,7 @@ public DataCenterVO createZone(final long userId, final String zoneName, final S @Override public DataCenterVO doInTransaction(final TransactionStatus status) { final DataCenterVO zone = _zoneDao.persist(zoneFinal); + CallContext.current().putContextParameter(DataCenter.class, zone.getUuid()); if (domainId != null) { // zone is explicitly dedicated to this domain // create affinity group associated and dedicate the zone. From 4297857e22946e9b4f421757188f07ab3f8e1efc Mon Sep 17 00:00:00 2001 From: nvazquez Date: Tue, 5 Jul 2016 15:21:26 -0700 Subject: [PATCH 017/687] CLOUDSTACK-9428: Fix for CLOUDSTACK-9211 - Improve performance --- api/src/com/cloud/vm/VmDetailConstants.java | 1 + .../vmware/resource/VmwareResource.java | 46 ++++++------- .../vmware/resource/VmwareResourceTest.java | 68 ++++++++++++++++--- 3 files changed, 80 insertions(+), 35 deletions(-) diff --git a/api/src/com/cloud/vm/VmDetailConstants.java b/api/src/com/cloud/vm/VmDetailConstants.java index d34afc13a169..c3c6db7a2965 100644 --- a/api/src/com/cloud/vm/VmDetailConstants.java +++ b/api/src/com/cloud/vm/VmDetailConstants.java @@ -23,4 +23,5 @@ public interface VmDetailConstants { public static final String NESTED_VIRTUALIZATION_FLAG = "nestedVirtualizationFlag"; public static final String HYPERVISOR_TOOLS_VERSION = "hypervisortoolsversion"; public static final String DATA_DISK_CONTROLLER = "dataDiskController"; + public static final String SVGA_VRAM_SIZE = "svga.vramSize"; } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 238ba3ed864a..9b7885a74e17 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -1946,6 +1946,9 @@ protected StartAnswer execute(StartCommand cmd) { vmConfigSpec.getExtraConfig().addAll( Arrays.asList(configureVnc(extraOptions.toArray(new OptionValue[0]), hyperHost, vmInternalCSName, vmSpec.getVncPassword(), keyboardLayout))); + // config video card + configureVideoCard(vmMo, vmSpec, vmConfigSpec); + // // Configure VM // @@ -1964,8 +1967,6 @@ protected StartAnswer execute(StartCommand cmd) { postDiskConfigBeforeStart(vmMo, vmSpec, sortedDisks, ideControllerKey, scsiControllerKey, iqnToPath, hyperHost, context); - postVideoCardMemoryConfigBeforeStart(vmMo, vmSpec); - // // Power-on VM // @@ -2015,26 +2016,24 @@ protected StartAnswer execute(StartCommand cmd) { } /** - * Sets video card memory to the one provided in detail svga.vramSize (if provided). + * Sets video card memory to the one provided in detail svga.vramSize (if provided) on {@code vmConfigSpec}. * 64MB was always set before. * Size must be in KB. * @param vmMo virtual machine mo * @param vmSpec virtual machine specs + * @param vmConfigSpec virtual machine config spec + * @throws Exception exception */ - protected void postVideoCardMemoryConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec) { - String paramVRamSize = "svga.vramSize"; - if (vmSpec.getDetails().containsKey(paramVRamSize)){ - String value = vmSpec.getDetails().get(paramVRamSize); + protected void configureVideoCard(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, VirtualMachineConfigSpec vmConfigSpec) throws Exception { + if (vmSpec.getDetails().containsKey(VmDetailConstants.SVGA_VRAM_SIZE)){ + String value = vmSpec.getDetails().get(VmDetailConstants.SVGA_VRAM_SIZE); try { long svgaVmramSize = Long.parseLong(value); - setNewVRamSizeVmVideoCard(vmMo, svgaVmramSize); + setNewVRamSizeVmVideoCard(vmMo, svgaVmramSize, vmConfigSpec); } catch (NumberFormatException e){ s_logger.error("Unexpected value, cannot parse " + value + " to long due to: " + e.getMessage()); } - catch (Exception e){ - s_logger.error("Error while reconfiguring vm due to: " + e.getMessage()); - } } } @@ -2042,39 +2041,38 @@ protected void postVideoCardMemoryConfigBeforeStart(VirtualMachineMO vmMo, Virtu * Search for vm video card iterating through vm device list * @param vmMo virtual machine mo * @param svgaVmramSize new svga vram size (in KB) + * @param vmConfigSpec virtual machine config spec */ - private void setNewVRamSizeVmVideoCard(VirtualMachineMO vmMo, long svgaVmramSize) throws Exception { + protected void setNewVRamSizeVmVideoCard(VirtualMachineMO vmMo, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec) throws Exception { for (VirtualDevice device : vmMo.getAllDeviceList()){ if (device instanceof VirtualMachineVideoCard){ VirtualMachineVideoCard videoCard = (VirtualMachineVideoCard) device; - modifyVmVideoCardVRamSize(videoCard, vmMo, svgaVmramSize); + modifyVmVideoCardVRamSize(videoCard, vmMo, svgaVmramSize, vmConfigSpec); } } } /** - * Modifies vm vram size if it was set to a different size to the one provided in svga.vramSize (user_vm_details or template_vm_details) + * Modifies vm vram size if it was set to a different size to the one provided in svga.vramSize (user_vm_details or template_vm_details) on {@code vmConfigSpec} * @param videoCard vm's video card device * @param vmMo virtual machine mo * @param svgaVmramSize new svga vram size (in KB) + * @param vmConfigSpec virtual machine config spec */ - private void modifyVmVideoCardVRamSize(VirtualMachineVideoCard videoCard, VirtualMachineMO vmMo, long svgaVmramSize) throws Exception { + protected void modifyVmVideoCardVRamSize(VirtualMachineVideoCard videoCard, VirtualMachineMO vmMo, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec) { if (videoCard.getVideoRamSizeInKB().longValue() != svgaVmramSize){ s_logger.info("Video card memory was set " + videoCard.getVideoRamSizeInKB().longValue() + "kb instead of " + svgaVmramSize + "kb"); - VirtualMachineConfigSpec newSizeSpecs = configSpecVideoCardNewVRamSize(videoCard, svgaVmramSize); - boolean res = vmMo.configureVm(newSizeSpecs); - if (res) { - s_logger.info("Video card memory successfully updated to " + svgaVmramSize + "kb"); - } + configureSpecVideoCardNewVRamSize(videoCard, svgaVmramSize, vmConfigSpec); } } /** - * Returns a VirtualMachineConfigSpec to edit its svga vram size + * Add edit spec on {@code vmConfigSpec} to modify svga vram size * @param videoCard video card device to edit providing the svga vram size * @param svgaVmramSize new svga vram size (in KB) + * @param vmConfigSpec virtual machine spec */ - private VirtualMachineConfigSpec configSpecVideoCardNewVRamSize(VirtualMachineVideoCard videoCard, long svgaVmramSize){ + protected void configureSpecVideoCardNewVRamSize(VirtualMachineVideoCard videoCard, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec){ videoCard.setVideoRamSizeInKB(svgaVmramSize); videoCard.setUseAutoDetect(false); @@ -2082,9 +2080,7 @@ private VirtualMachineConfigSpec configSpecVideoCardNewVRamSize(VirtualMachineVi arrayVideoCardConfigSpecs.setDevice(videoCard); arrayVideoCardConfigSpecs.setOperation(VirtualDeviceConfigSpecOperation.EDIT); - VirtualMachineConfigSpec changeVideoCardSpecs = new VirtualMachineConfigSpec(); - changeVideoCardSpecs.getDeviceChange().add(arrayVideoCardConfigSpecs); - return changeVideoCardSpecs; + vmConfigSpec.getDeviceChange().add(arrayVideoCardConfigSpecs); } private void tearDownVm(VirtualMachineMO vmMo) throws Exception{ diff --git a/plugins/hypervisors/vmware/test/com/cloud/hypervisor/vmware/resource/VmwareResourceTest.java b/plugins/hypervisors/vmware/test/com/cloud/hypervisor/vmware/resource/VmwareResourceTest.java index 22388a8a69cb..33e7cd2fc704 100644 --- a/plugins/hypervisors/vmware/test/com/cloud/hypervisor/vmware/resource/VmwareResourceTest.java +++ b/plugins/hypervisors/vmware/test/com/cloud/hypervisor/vmware/resource/VmwareResourceTest.java @@ -19,13 +19,14 @@ import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.mockito.Mockito.mock; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.any; import static org.mockito.Mockito.never; +import static org.mockito.Matchers.eq; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.Map; @@ -34,8 +35,10 @@ import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; +import org.mockito.InOrder; import org.mockito.InjectMocks; import org.mockito.Mock; +import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.Spy; import org.powermock.api.mockito.PowerMockito; @@ -43,6 +46,7 @@ import org.powermock.modules.junit4.PowerMockRunner; import com.vmware.vim25.VirtualDevice; +import com.vmware.vim25.VirtualDeviceConfigSpec; import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VirtualMachineVideoCard; import com.cloud.agent.api.Command; @@ -99,6 +103,10 @@ public VmwareHypervisorHost getHyperHost(VmwareContext context, Command cmd) { @Mock VirtualMachineTO vmSpec3dgpu; @Mock + VirtualMachineVideoCard videoCard; + @Mock + VirtualDevice virtualDevice; + @Mock DataTO srcDataTO; @Mock NfsTO srcDataNfsTO; @@ -107,9 +115,11 @@ public VmwareHypervisorHost getHyperHost(VmwareContext context, Command cmd) { private static final Integer NFS_VERSION = Integer.valueOf(3); private static final Integer NFS_VERSION_NOT_PRESENT = null; + private static final long VRAM_MEMORY_SIZE = 131072l; + private static final long VIDEO_CARD_MEMORY_SIZE = 65536l; @Before - public void setup() { + public void setup() throws Exception { MockitoAnnotations.initMocks(this); storageCmd = PowerMockito.mock(CopyCommand.class); doReturn(context).when(_resource).getServiceContext(null); @@ -117,6 +127,7 @@ public void setup() { when(storageCmd.getSrcTO()).thenReturn(srcDataTO); when(srcDataTO.getDataStore()).thenReturn(srcDataNfsTO); when(srcDataNfsTO.getNfsVersion()).thenReturn(NFS_VERSION); + when(videoCard.getVideoRamSizeInKB()).thenReturn(VIDEO_CARD_MEMORY_SIZE); } //Test successful scaling up the vm @@ -138,19 +149,56 @@ public void testScaleVMF1() throws Exception { } @Test - public void testStartVm3dgpuEnabled() throws Exception{ + public void testConfigureVideoCardSvgaVramProvided() throws Exception { Map specDetails = new HashMap(); - specDetails.put("svga.vramSize", "131072"); + specDetails.put("svga.vramSize", String.valueOf(VRAM_MEMORY_SIZE)); when(vmSpec3dgpu.getDetails()).thenReturn(specDetails); - VirtualMachineVideoCard videoCard = mock(VirtualMachineVideoCard.class); - when(videoCard.getVideoRamSizeInKB()).thenReturn(65536l); - when(vmMo3dgpu.getAllDeviceList()).thenReturn(Arrays.asList((VirtualDevice) videoCard)); + _resource.configureVideoCard(vmMo3dgpu, vmSpec3dgpu, vmConfigSpec); + verify(_resource).setNewVRamSizeVmVideoCard(vmMo3dgpu, VRAM_MEMORY_SIZE, vmConfigSpec); + } + + @Test + public void testConfigureVideoCardNotSvgaVramProvided() throws Exception { + _resource.configureVideoCard(vmMo3dgpu, vmSpec3dgpu, vmConfigSpec); + verify(_resource, never()).setNewVRamSizeVmVideoCard(vmMo3dgpu, VRAM_MEMORY_SIZE, vmConfigSpec); + } + + @Test + public void testModifyVmVideoCardVRamSizeDifferentVramSizes() { + _resource.modifyVmVideoCardVRamSize(videoCard, vmMo3dgpu, VRAM_MEMORY_SIZE, vmConfigSpec); + verify(_resource).configureSpecVideoCardNewVRamSize(videoCard, VRAM_MEMORY_SIZE, vmConfigSpec); + } + + @Test + public void testModifyVmVideoCardVRamSizeEqualSizes() { + _resource.modifyVmVideoCardVRamSize(videoCard, vmMo3dgpu, VIDEO_CARD_MEMORY_SIZE, vmConfigSpec); + verify(_resource, never()).configureSpecVideoCardNewVRamSize(videoCard, VIDEO_CARD_MEMORY_SIZE, vmConfigSpec); + } - when(vmMo3dgpu.configureVm(any(VirtualMachineConfigSpec.class))).thenReturn(true); + @Test + public void testSetNewVRamSizeVmVideoCardPresent() throws Exception { + when(vmMo3dgpu.getAllDeviceList()).thenReturn(Arrays.asList(videoCard, virtualDevice)); + _resource.setNewVRamSizeVmVideoCard(vmMo3dgpu, VRAM_MEMORY_SIZE, vmConfigSpec); + verify(_resource).modifyVmVideoCardVRamSize(videoCard, vmMo3dgpu, VRAM_MEMORY_SIZE, vmConfigSpec); + } - _resource.postVideoCardMemoryConfigBeforeStart(vmMo3dgpu, vmSpec3dgpu); - verify(vmMo3dgpu).configureVm(any(VirtualMachineConfigSpec.class)); + @Test + public void testSetNewVRamSizeVmVideoCardNotPresent() throws Exception { + when(vmMo3dgpu.getAllDeviceList()).thenReturn(Arrays.asList(virtualDevice)); + _resource.setNewVRamSizeVmVideoCard(vmMo3dgpu, VRAM_MEMORY_SIZE, vmConfigSpec); + verify(_resource, never()).modifyVmVideoCardVRamSize(any(VirtualMachineVideoCard.class), eq(vmMo3dgpu), eq(VRAM_MEMORY_SIZE), eq(vmConfigSpec)); + } + + @Test + public void testConfigureSpecVideoCardNewVRamSize() { + when(vmConfigSpec.getDeviceChange()).thenReturn(new ArrayList()); + _resource.configureSpecVideoCardNewVRamSize(videoCard, VRAM_MEMORY_SIZE, vmConfigSpec); + + InOrder inOrder = Mockito.inOrder(videoCard, vmConfigSpec); + inOrder.verify(videoCard).setVideoRamSizeInKB(VRAM_MEMORY_SIZE); + inOrder.verify(videoCard).setUseAutoDetect(false); + inOrder.verify(vmConfigSpec).getDeviceChange(); } // --------------------------------------------------------------------------------------------------- From 46df85c5bf14cd64a83e95e8ac035dadd89c0a2f Mon Sep 17 00:00:00 2001 From: Nathan Johnson Date: Wed, 17 Aug 2016 12:17:14 -0500 Subject: [PATCH 018/687] CLOUDSTACK-9461 This converts the rbd raw format on disk to qcow2 for compression. --- .../com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 29655d10784c..f11cb21084e1 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -737,10 +737,10 @@ public Answer backupSnapshot(final CopyCommand cmd) { final QemuImgFile srcFile = new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(primaryPool.getSourceHost(), primaryPool.getSourcePort(), primaryPool.getAuthUserName(), primaryPool.getAuthSecret(), rbdSnapshot)); - srcFile.setFormat(PhysicalDiskFormat.RAW); + srcFile.setFormat(snapshotDisk.getFormat()); final QemuImgFile destFile = new QemuImgFile(snapshotFile); - destFile.setFormat(snapshotDisk.getFormat()); + destFile.setFormat(PhysicalDiskFormat.QCOW2); s_logger.debug("Backing up RBD snapshot " + rbdSnapshot + " to " + snapshotFile); final QemuImg q = new QemuImg(cmd.getWaitInMillSeconds()); From 2de5b0dc98148c8e447484b0190c9b5d27a66c35 Mon Sep 17 00:00:00 2001 From: nvazquez Date: Wed, 31 Aug 2016 18:26:43 -0700 Subject: [PATCH 019/687] CLOUDSTACK-9428: Add marvin test --- .../smoke/test_deploy_vgpu_enabled_vm.py | 180 ++++++++++++++---- 1 file changed, 144 insertions(+), 36 deletions(-) diff --git a/test/integration/smoke/test_deploy_vgpu_enabled_vm.py b/test/integration/smoke/test_deploy_vgpu_enabled_vm.py index c9eb7672e2dc..d49ab08d4849 100644 --- a/test/integration/smoke/test_deploy_vgpu_enabled_vm.py +++ b/test/integration/smoke/test_deploy_vgpu_enabled_vm.py @@ -24,21 +24,20 @@ # base - contains all resources as entities and defines create, delete, # list operations on them -from marvin.lib.base import Account, VirtualMachine, ServiceOffering +from marvin.lib.base import Account, VirtualMachine, ServiceOffering, NetworkOffering, Network, Template # utils - utility classes for common cleanup, external library wrappers etc -from marvin.lib.utils import cleanup_resources +from marvin.lib.utils import cleanup_resources, get_hypervisor_type, validateList # common - commonly used methods for all tests are listed here from marvin.lib.common import get_zone, get_domain, get_template, list_hosts from marvin.sshClient import SshClient -from marvin.codes import FAILED +from marvin.codes import FAILED, PASS from nose.plugins.attrib import attr - class TestDeployvGPUenabledVM(cloudstackTestCase): """ @@ -100,48 +99,76 @@ def setUpClass(self): def setUp(self): self.testdata = self.testClient.getParsedTestDataConfig()["vgpu"] self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() if self.noSuitableHost or self.unsupportedHypervisor: - self.skipTest("Skipping test because suitable hypervisor/host not\ - present") + self.hypervisor = get_hypervisor_type(self.apiclient) + if self.hypervisor.lower() not in ["vmware"]: + self.skipTest("Skipping test because suitable hypervisor/host not\ + present") + self.testdata = self.testClient.getParsedTestDataConfig() # Get Zone, Domain and Default Built-in template self.domain = get_domain(self.apiclient) - self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests()) - self.testdata["mode"] = self.zone.networktype - # Before running this test, register a windows template with ostype as - # 'Windows 7 (32-bit)' + self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests()) + + # Before running this test for Xen Server, register a windows template with ostype as + # 'Windows 7 (32-bit)' self.template = get_template( - self.apiclient, - self.zone.id, - self.testdata["ostype"]) + self.apiclient, + self.zone.id, + self.testdata["ostype"]) - if self.template == FAILED: - assert False, "get_template() failed to return template with description %s" % self.testdata[ - "ostype"] - # create a user account + # create a user account self.account = Account.create( - self.apiclient, - self.testdata["account"], - domainid=self.domain.id + self.apiclient, + self.testdata["account"], + domainid=self.domain.id ) + self.cleanup = [] - self.testdata["small"]["zoneid"] = self.zone.id - self.testdata["small"]["template"] = self.template.id + if self.hypervisor.lower() in ["xenserver"]: + self.testdata["mode"] = self.zone.networktype - self.testdata["service_offerings"]["vgpu260qwin"]["serviceofferingdetails"] = [ - { - 'pciDevice': 'Group of NVIDIA Corporation GK107GL [GRID K1] GPUs'}, { - 'vgpuType': 'GRID K120Q'}] - # create a service offering - self.service_offering = ServiceOffering.create( - self.apiclient, - self.testdata["service_offerings"]["vgpu260qwin"], - ) - # build cleanup list - self.cleanup = [ - self.service_offering, - self.account - ] + if self.template == FAILED: + assert False, "get_template() failed to return template with description %s" % self.testdata[ + "ostype"] + + self.testdata["small"]["zoneid"] = self.zone.id + self.testdata["small"]["template"] = self.template.id + + self.testdata["service_offerings"]["vgpu260qwin"]["serviceofferingdetails"] = [ + { + 'pciDevice': 'Group of NVIDIA Corporation GK107GL [GRID K1] GPUs'}, { + 'vgpuType': 'GRID K120Q'}] + # create a service offering + self.service_offering = ServiceOffering.create( + self.apiclient, + self.testdata["service_offerings"]["vgpu260qwin"], + ) + # build cleanup list + self.cleanup = [ + self.service_offering, + self.account + ] + elif self.hypervisor.lower() in ["vmware"]: + self.testdata["isolated_network"]["zoneid"] = self.zone.id + self.userapiclient = self.testClient.getUserApiClient( + UserName=self.account.name, + DomainName=self.account.domain + ) + self.service_offering = ServiceOffering.create( + self.apiclient, + self.testdata["service_offering"]) + self.cleanup.append(self.service_offering) + + # Create Shared Network Offering + self.isolated_network_offering = NetworkOffering.create( + self.apiclient, + self.testdata["isolated_network_offering"]) + self.cleanup.append(self.isolated_network_offering) + # Enable Isolated Network offering + self.isolated_network_offering.update(self.apiclient, state='Enabled') + @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") def test_deploy_vgpu_enabled_vm(self): @@ -152,6 +179,10 @@ def test_deploy_vgpu_enabled_vm(self): # 2. Virtual Machine is vGPU enabled (via SSH) # 3. listVirtualMachines returns accurate information """ + + if self.hypervisor.lower() not in ["xenserver"]: + self.skipTest("This test case is written specifically\ + for XenServer hypervisor") self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["small"], @@ -228,3 +259,80 @@ def tearDown(self): cleanup_resources(self.apiclient, self.cleanup) except Exception as e: self.debug("Warning! Exception in tearDown: %s" % e) + + @attr(tags=["advanced"]) + def test_3d_gpu_support(self): + """ + + # 1. Register a template for VMware with nicAdapter vmxnet3 and 3D GPU details + # 2. Deploy a VM using this template + # 3. Create an isolated network + # 4. Add network to VM + # 5. Verify vm details for 3D GPU details + """ + + if self.hypervisor.lower() not in ["vmware"]: + self.skipTest("This test case is written specifically\ + for Vmware hypervisor") + + # Register a private template in the account with nic adapter vmxnet3 + # Also add required 3D GPU details for enabling it + template = Template.register( + self.userapiclient, + self.testdata["configurableData"]["vmxnet3template"], + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid, + details=[{"mks.enable3d" : "true", "mks.use3dRenderer" : "automatic", + "svga.autodetect" : "false", "svga.vramSize" : "131072"}] + ) + self.cleanup.append(template) + template.download(self.apiclient) + + templates = Template.list( + self.userapiclient, + listall=True, + id=template.id, + templatefilter="self" + ) + + self.assertEqual( + validateList(templates)[0], + PASS, + "Templates list validation failed" + ) + + self.testdata["virtual_machine"]["zoneid"] = self.zone.id + self.testdata["virtual_machine"]["template"] = template.id + + virtual_machine = VirtualMachine.create( + self.apiclient, + self.testdata["virtual_machine"], + accountid=self.account.name, + domainid=self.account.domainid, + templateid=template.id, + serviceofferingid=self.service_offering.id) + + isolated_network = Network.create( + self.apiclient, + self.testdata["isolated_network"], + self.account.name, + self.account.domainid, + networkofferingid=self.isolated_network_offering.id) + + virtual_machine.add_nic(self.apiclient, isolated_network.id) + + qresultset = self.dbclient.execute("select id from vm_instance where uuid = '%s';" % virtual_machine.id) + vm_id = qresultset[0] + qresultset = self.dbclient.execute("select name, value from user_vm_details where vm_id = '%d';" % vm_id) + detailKeys = [x[0] for x in qresultset] + + self.assertTrue('mks.enable3d' in detailKeys and 'mks.use3dRenderer' in detailKeys and 'svga.autodetect' in detailKeys and 'svga.vramSize' in detailKeys, "VM details do not contain 3D GPU details") + + self.assertEquals('true', qresultset[detailKeys.index('mks.enable3d')][1], "Expected detail 'mks.enable3d'='true'") + + self.assertEquals('automatic', qresultset[detailKeys.index('mks.use3dRenderer')][1], "Expected detail 'mks.use3dRenderer'='automatic'") + + self.assertEquals('false', qresultset[detailKeys.index('svga.autodetect')][1], "Expected detail 'svga.autodetect'='false'") + + self.assertEquals('131072', qresultset[detailKeys.index('svga.vramSize')][1], "Expected detail 'svga.vramSize'='131072'") From 8463fb153a0b439e7b306e380c6e5ce942663875 Mon Sep 17 00:00:00 2001 From: Milamber Date: Sun, 4 Sep 2016 09:34:15 +0100 Subject: [PATCH 020/687] In comment, Add missing packages for Docker Ubuntu builds --- packaging/build-deb.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/build-deb.sh b/packaging/build-deb.sh index aa17dc51c9fa..52af16a91475 100755 --- a/packaging/build-deb.sh +++ b/packaging/build-deb.sh @@ -34,10 +34,10 @@ set -e # Assume that the cloudstack source is present in /tmp/cloudstack # # Ubuntu 16.04 -# docker run -ti -v /tmp:/src ubuntu:16.04 /bin/bash -c "apt-get update && apt-get install -y dpkg-dev python debhelper openjdk-8-jdk genisoimage python-mysql.connector maven lsb-release devscripts && /src/cloudstack/packaging/build-deb.sh" +# docker run -ti -v /tmp:/src ubuntu:16.04 /bin/bash -c "apt-get update && apt-get install -y dpkg-dev python debhelper openjdk-8-jdk genisoimage python-mysql.connector maven lsb-release devscripts dh-systemd python-setuptools && /src/cloudstack/packaging/build-deb.sh" # # Ubuntu 14.04 -# docker run -ti -v /tmp:/src ubuntu:14.04 /bin/bash -c "apt-get update && apt-get install -y dpkg-dev python debhelper openjdk-7-jdk genisoimage python-mysql.connector maven lsb-release devscripts && /src/cloudstack/packaging/build-deb.sh" +# docker run -ti -v /tmp:/src ubuntu:14.04 /bin/bash -c "apt-get update && apt-get install -y dpkg-dev python debhelper openjdk-7-jdk genisoimage python-mysql.connector maven lsb-release devscripts dh-systemd python-setuptools && /src/cloudstack/packaging/build-deb.sh" # cd `dirname $0` From 1384d748a7296019f0d8ba2a5fcf4b97090f2e04 Mon Sep 17 00:00:00 2001 From: nvazquez Date: Fri, 20 May 2016 13:01:03 -0300 Subject: [PATCH 021/687] CLOUDSTACK-9386: Find vm on datacenter instead of randomly choosing a cluster --- .../vmware/resource/VmwareResource.java | 31 ++++++++++------ .../vmware/resource/VmwareResourceTest.java | 35 +++++++++++++++++-- 2 files changed, 54 insertions(+), 12 deletions(-) diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 9b7885a74e17..a6db828f22fc 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -5535,17 +5535,8 @@ public Answer execute(DestroyCommand cmd) { VmwareHypervisorHost hyperHost = getHyperHost(context, null); VolumeTO vol = cmd.getVolume(); - ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, vol.getPoolUuid()); - if (morDs == null) { - String msg = "Unable to find datastore based on volume mount point " + vol.getMountPoint(); - s_logger.error(msg); - throw new Exception(msg); - } - - ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); - ClusterMO clusterMo = new ClusterMO(context, morCluster); + VirtualMachineMO vmMo = findVmOnDatacenter(context, hyperHost, vol); - VirtualMachineMO vmMo = clusterMo.findVmOnHyperHost(vol.getPath()); if (vmMo != null && vmMo.isTemplate()) { if (s_logger.isInfoEnabled()) { s_logger.info("Destroy template volume " + vol.getPath()); @@ -5570,6 +5561,26 @@ public Answer execute(DestroyCommand cmd) { } } + /** + * Use data center to look for vm, instead of randomly picking up a cluster
+ * (in multiple cluster environments vm could not be found if wrong cluster was chosen) + * @param context vmware context + * @param hyperHost vmware hv host + * @param vol volume + * @return a virtualmachinemo if could be found on datacenter. + * @throws Exception if there is an error while finding vm + * @throws CloudRuntimeException if datacenter cannot be found + */ + protected VirtualMachineMO findVmOnDatacenter(VmwareContext context, VmwareHypervisorHost hyperHost, VolumeTO vol) throws Exception { + DatacenterMO dcMo = new DatacenterMO(context, hyperHost.getHyperHostDatacenter()); + if (dcMo.getMor() == null) { + String msg = "Unable to find VMware DC"; + s_logger.error(msg); + throw new CloudRuntimeException(msg); + } + return dcMo.findVm(vol.getPath()); + } + private String getAbsoluteVmdkFile(VirtualDisk disk) { String vmdkAbsFile = null; VirtualDeviceBackingInfo backingInfo = disk.getBacking(); diff --git a/plugins/hypervisors/vmware/test/com/cloud/hypervisor/vmware/resource/VmwareResourceTest.java b/plugins/hypervisors/vmware/test/com/cloud/hypervisor/vmware/resource/VmwareResourceTest.java index 33e7cd2fc704..efaf6d28d8bf 100644 --- a/plugins/hypervisors/vmware/test/com/cloud/hypervisor/vmware/resource/VmwareResourceTest.java +++ b/plugins/hypervisors/vmware/test/com/cloud/hypervisor/vmware/resource/VmwareResourceTest.java @@ -27,6 +27,8 @@ import static org.mockito.Matchers.eq; import java.util.ArrayList; +import static org.powermock.api.mockito.PowerMockito.whenNew; + import java.util.Arrays; import java.util.HashMap; import java.util.Map; @@ -45,6 +47,7 @@ import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; +import com.vmware.vim25.ManagedObjectReference; import com.vmware.vim25.VirtualDevice; import com.vmware.vim25.VirtualDeviceConfigSpec; import com.vmware.vim25.VirtualMachineConfigSpec; @@ -55,16 +58,22 @@ import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.NfsTO; import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.agent.api.to.VolumeTO; +import com.cloud.hypervisor.vmware.mo.DatacenterMO; import com.cloud.hypervisor.vmware.mo.VirtualMachineMO; import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.storage.resource.VmwareStorageProcessor; import com.cloud.storage.resource.VmwareStorageSubsystemCommandHandler; +import com.cloud.utils.exception.CloudRuntimeException; + @RunWith(PowerMockRunner.class) -@PrepareForTest(CopyCommand.class) +@PrepareForTest({CopyCommand.class, DatacenterMO.class, VmwareResource.class}) public class VmwareResourceTest { + private static final String VOLUME_PATH = "XXXXXXXXXXXX"; + @Mock VmwareStorageProcessor storageProcessor; @Mock @@ -110,6 +119,12 @@ public VmwareHypervisorHost getHyperHost(VmwareContext context, Command cmd) { DataTO srcDataTO; @Mock NfsTO srcDataNfsTO; + @Mock + VolumeTO volume; + @Mock + ManagedObjectReference mor; + @Mock + DatacenterMO datacenter; CopyCommand storageCmd; @@ -128,6 +143,7 @@ public void setup() throws Exception { when(srcDataTO.getDataStore()).thenReturn(srcDataNfsTO); when(srcDataNfsTO.getNfsVersion()).thenReturn(NFS_VERSION); when(videoCard.getVideoRamSizeInKB()).thenReturn(VIDEO_CARD_MEMORY_SIZE); + when(volume.getPath()).thenReturn(VOLUME_PATH); } //Test successful scaling up the vm @@ -258,4 +274,19 @@ public void checkStorageProcessorAndHandlerNfsVersionAttributeVersionSet(){ verify(_resource, never()).examineStorageSubSystemCommandNfsVersion(storageCmd); } -} \ No newline at end of file + @Test(expected=CloudRuntimeException.class) + public void testFindVmOnDatacenterNullHyperHostReference() throws Exception { + when(hyperHost.getMor()).thenReturn(null); + _resource.findVmOnDatacenter(context, hyperHost, volume); + } + + @Test + public void testFindVmOnDatacenter() throws Exception { + when(hyperHost.getHyperHostDatacenter()).thenReturn(mor); + when(datacenter.getMor()).thenReturn(mor); + when(datacenter.findVm(VOLUME_PATH)).thenReturn(vmMo); + whenNew(DatacenterMO.class).withArguments(context, mor).thenReturn(datacenter); + VirtualMachineMO result = _resource.findVmOnDatacenter(context, hyperHost, volume); + assertEquals(vmMo, result); + } +} From 75f89c7676ac04821436e4b1bff820a17797bf46 Mon Sep 17 00:00:00 2001 From: Bharat Kumar Date: Thu, 6 Aug 2015 14:15:48 +0530 Subject: [PATCH 022/687] CLOUDSTACK-8751 Minimise network downtime during network updates when redundant VR is being used. database schema changes Made changes to the updateNetwork API. --- api/src/com/cloud/network/Network.java | 2 + api/src/com/cloud/network/NetworkService.java | 2 +- .../network/element/RedundantResource.java | 11 + .../cloud/network/router/VirtualRouter.java | 4 + .../apache/cloudstack/api/ApiConstants.java | 1 + .../network/UpdateNetworkCmdByAdmin.java | 2 +- .../user/network/UpdateNetworkCmd.java | 12 +- .../service/NetworkOrchestrationService.java | 6 + .../orchestration/NetworkOrchestrator.java | 53 +++- .../src/com/cloud/vm/DomainRouterVO.java | 14 + .../com/cloud/network/NetworkServiceImpl.java | 252 ++++++++++-------- .../network/element/VirtualRouterElement.java | 98 ++++++- .../VirtualNetworkApplianceManagerImpl.java | 5 + .../RouterDeploymentDefinition.java | 12 +- .../RouterDeploymentDefinitionBuilder.java | 4 + .../com/cloud/vpc/MockNetworkManagerImpl.java | 17 +- .../RouterDeploymentDefinitionTest.java | 2 +- .../RouterDeploymentDefinitionTestBase.java | 3 + setup/db/db/schema-452to460.sql | 2 + 19 files changed, 377 insertions(+), 125 deletions(-) create mode 100644 api/src/com/cloud/network/element/RedundantResource.java diff --git a/api/src/com/cloud/network/Network.java b/api/src/com/cloud/network/Network.java index 7cc5441603ae..81447d6457c5 100644 --- a/api/src/com/cloud/network/Network.java +++ b/api/src/com/cloud/network/Network.java @@ -41,6 +41,8 @@ public enum GuestType { Shared, Isolated } + public String updatingInSequence ="updatingInSequence"; + public static class Service { private static List supportedServices = new ArrayList(); diff --git a/api/src/com/cloud/network/NetworkService.java b/api/src/com/cloud/network/NetworkService.java index c1b68eb3a463..e26db340ff4f 100644 --- a/api/src/com/cloud/network/NetworkService.java +++ b/api/src/com/cloud/network/NetworkService.java @@ -77,7 +77,7 @@ IpAddress allocatePortableIP(Account ipOwner, int regionId, Long zoneId, Long ne IpAddress getIp(long id); Network updateGuestNetwork(long networkId, String name, String displayText, Account callerAccount, User callerUser, String domainSuffix, Long networkOfferingId, - Boolean changeCidr, String guestVmCidr, Boolean displayNetwork, String newUUID); + Boolean changeCidr, String guestVmCidr, Boolean displayNetwork, String newUUID, boolean updateInSequence); PhysicalNetwork createPhysicalNetwork(Long zoneId, String vnetRange, String networkSpeed, List isolationMethods, String broadcastDomainRange, Long domainId, List tags, String name); diff --git a/api/src/com/cloud/network/element/RedundantResource.java b/api/src/com/cloud/network/element/RedundantResource.java new file mode 100644 index 000000000000..863c9cd330c5 --- /dev/null +++ b/api/src/com/cloud/network/element/RedundantResource.java @@ -0,0 +1,11 @@ +package com.cloud.network.element; + +import com.cloud.network.Network; + +/** + * Created by bharat on 11/08/15. + */ +public interface RedundantResource { + public void configureResource(Network network); + public int getResourceCount(Network network); +} diff --git a/api/src/com/cloud/network/router/VirtualRouter.java b/api/src/com/cloud/network/router/VirtualRouter.java index 0114a962146f..060ef0fa1416 100644 --- a/api/src/com/cloud/network/router/VirtualRouter.java +++ b/api/src/com/cloud/network/router/VirtualRouter.java @@ -26,6 +26,10 @@ public enum Role { VIRTUAL_ROUTER, LB, INTERNAL_LB_VM } + public enum UpdateState { + UPDATE_NEEDED, UPDATE_IN_PROGRESS, UPDATE_COMPLETE, UPDATE_FAILED + } + Role getRole(); boolean getIsRedundantRouter(); diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java index 1d0b4a3756a7..0e838490eb08 100644 --- a/api/src/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/org/apache/cloudstack/api/ApiConstants.java @@ -272,6 +272,7 @@ public class ApiConstants { public static final String USERNAME = "username"; public static final String USER_SECURITY_GROUP_LIST = "usersecuritygrouplist"; public static final String USE_VIRTUAL_NETWORK = "usevirtualnetwork"; + public static final String Update_IN_SEQUENCE ="updateinsequence"; public static final String VALUE = "value"; public static final String VIRTUAL_MACHINE_ID = "virtualmachineid"; public static final String VIRTUAL_MACHINE_IDS = "virtualmachineids"; diff --git a/api/src/org/apache/cloudstack/api/command/admin/network/UpdateNetworkCmdByAdmin.java b/api/src/org/apache/cloudstack/api/command/admin/network/UpdateNetworkCmdByAdmin.java index 269f43ec9596..f2c5119466cc 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/network/UpdateNetworkCmdByAdmin.java +++ b/api/src/org/apache/cloudstack/api/command/admin/network/UpdateNetworkCmdByAdmin.java @@ -49,7 +49,7 @@ public void execute() throws InsufficientCapacityException, ConcurrentOperationE } Network result = _networkService.updateGuestNetwork(getId(), getNetworkName(), getDisplayText(), callerAccount, - callerUser, getNetworkDomain(), getNetworkOfferingId(), getChangeCidr(), getGuestVmCidr(), getDisplayNetwork(), getCustomId()); + callerUser, getNetworkDomain(), getNetworkOfferingId(), getChangeCidr(), getGuestVmCidr(), getDisplayNetwork(), getCustomId(), getUpdateInSequence()); if (result != null) { diff --git a/api/src/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java b/api/src/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java index 921e74b67b2c..8ef9251e0475 100644 --- a/api/src/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java @@ -75,6 +75,9 @@ public class UpdateNetworkCmd extends BaseAsyncCustomIdCmd { @Parameter(name = ApiConstants.GUEST_VM_CIDR, type = CommandType.STRING, description = "CIDR for guest VMs, CloudStack allocates IPs to guest VMs only from this CIDR") private String guestVmCidr; + @Parameter(name =ApiConstants.Update_IN_SEQUENCE, type=CommandType.BOOLEAN, description = "if true, we will update the routers one after the other. applicable only for redundant router based networks using virtual router as provider") + private Boolean updateInSequence; + @Parameter(name = ApiConstants.DISPLAY_NETWORK, type = CommandType.BOOLEAN, description = "an optional field, whether to the display the network to the end user or not.", authorized = {RoleType.Admin}) @@ -119,6 +122,13 @@ public Boolean getDisplayNetwork() { return displayNetwork; } + public Boolean getUpdateInSequence(){ + if(updateInSequence ==null) + return false; + else + return updateInSequence; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -149,7 +159,7 @@ public void execute() throws InsufficientCapacityException, ConcurrentOperationE Network result = _networkService.updateGuestNetwork(getId(), getNetworkName(), getDisplayText(), callerAccount, callerUser, getNetworkDomain(), getNetworkOfferingId(), - getChangeCidr(), getGuestVmCidr(), getDisplayNetwork(), getCustomId()); + getChangeCidr(), getGuestVmCidr(), getDisplayNetwork(), getCustomId(), getUpdateInSequence()); if (result != null) { NetworkResponse response = _responseGenerator.createNetworkResponse(ResponseView.Restricted, result); diff --git a/engine/api/src/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java b/engine/api/src/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java index b71aa96d5ccc..1e2761f274a8 100644 --- a/engine/api/src/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java +++ b/engine/api/src/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java @@ -224,4 +224,10 @@ void implementNetworkElementsAndResources(DeployDestination dest, ReservationCon boolean resourceCountNeedsUpdate(NetworkOffering ntwkOff, ACLType aclType); void prepareAllNicsForMigration(VirtualMachineProfile vm, DeployDestination dest); + + boolean canUpdateInSequence(Network network); + + void configureUpdateInSequence(Network network); + + int getResourceCount(Network network); } diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index 66185c60978d..c0ea2f9b635e 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -36,6 +36,12 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.network.Networks; + +import com.cloud.network.dao.NetworkDetailsDao; +import com.cloud.network.element.RedundantResource; +import com.cloud.vm.dao.DomainRouterDao; +import org.apache.log4j.Logger; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.cloud.entity.api.db.VMNetworkMapVO; @@ -50,7 +56,6 @@ import org.apache.cloudstack.framework.messagebus.PublishScope; import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.region.PortableIpDao; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -107,7 +112,6 @@ import com.cloud.network.NetworkModel; import com.cloud.network.NetworkProfile; import com.cloud.network.NetworkStateListener; -import com.cloud.network.Networks; import com.cloud.network.Networks.BroadcastDomainType; import com.cloud.network.Networks.TrafficType; import com.cloud.network.PhysicalNetwork; @@ -265,9 +269,12 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra MessageBus _messageBus; @Inject VMNetworkMapDao _vmNetworkMapDao; + @Inject + DomainRouterDao _rotuerDao; List networkGurus; + public List getNetworkGurus() { return networkGurus; } @@ -350,6 +357,8 @@ public void setDhcpProviders(final List dhcpProviders) { PortableIpDao _portableIpDao; @Inject ConfigDepot _configDepot; + @Inject + NetworkDetailsDao _networkDetailsDao; protected StateMachine2 _stateMachine; ScheduledExecutorService _executor; @@ -1271,6 +1280,46 @@ protected boolean prepareElement(final NetworkElement element, final Network net return true; } + @Override + public boolean canUpdateInSequence(Network network){ + List providers = getNetworkProviders(network.getId()); + //check if the there are no service provider other than virtualrouter. + for(Provider provider :providers){ + if(provider!=Provider.VirtualRouter) + throw new UnsupportedOperationException("Cannot update the network resources in sequence when providers other than virtualrouter are used"); + } + return true; + } + + @Override + public void configureUpdateInSequence(Network network) { + List providers = getNetworkProviders(network.getId()); + for (NetworkElement element : networkElements) { + if (providers.contains(element.getProvider())) { + if (element instanceof RedundantResource) { + ((RedundantResource) element).configureResource(network); + } + } + } + } + + @Override + public int getResourceCount(Network network){ + List providers = getNetworkProviders(network.getId()); + int resourceCount=0; + for (NetworkElement element : networkElements) { + if (providers.contains(element.getProvider())) { + //currently only one element implements the redundant resource interface + if (element instanceof RedundantResource) { + resourceCount= ((RedundantResource) element).getResourceCount(network); + break; + } + } + } + return resourceCount; + } + + @DB protected void updateNic(final NicVO nic, final long networkId, final int count) { Transaction.execute(new TransactionCallbackNoReturn() { diff --git a/engine/schema/src/com/cloud/vm/DomainRouterVO.java b/engine/schema/src/com/cloud/vm/DomainRouterVO.java index 2596d24a5fb5..2a7aa49b6ed4 100644 --- a/engine/schema/src/com/cloud/vm/DomainRouterVO.java +++ b/engine/schema/src/com/cloud/vm/DomainRouterVO.java @@ -69,6 +69,11 @@ public class DomainRouterVO extends VMInstanceVO implements VirtualRouter { @Column(name = "vpc_id") private Long vpcId; + + @Column(name= "update_state") + @Enumerated(EnumType.STRING) + private UpdateState updateState; + public DomainRouterVO(final long id, final long serviceOfferingId, final long elementId, final String name, final long templateId, final HypervisorType hypervisorType, final long guestOSId, final long domainId, final long accountId, final long userId, final boolean isRedundantRouter, final RedundantState redundantState, final boolean haEnabled, final boolean stopPending, final Long vpcId) { @@ -193,4 +198,13 @@ public Long getVpcId() { return vpcId; } + public UpdateState getUpdateState() { + return updateState; + } + + public void setUpdateState(UpdateState updateState) { + this.updateState = updateState; + } + + } diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java index b6dac872f30a..bb573decde28 100644 --- a/server/src/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/com/cloud/network/NetworkServiceImpl.java @@ -108,6 +108,8 @@ import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.LoadBalancerVMMapDao; import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkDetailVO; +import com.cloud.network.dao.NetworkDetailsDao; import com.cloud.network.dao.NetworkDomainDao; import com.cloud.network.dao.NetworkDomainVO; import com.cloud.network.dao.NetworkServiceMapDao; @@ -178,6 +180,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.ExceptionUtil; import com.cloud.utils.net.NetUtils; +import com.cloud.vm.DomainRouterVO; import com.cloud.vm.Nic; import com.cloud.vm.NicSecondaryIp; import com.cloud.vm.NicVO; @@ -187,6 +190,7 @@ import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.NicDao; import com.cloud.vm.dao.NicSecondaryIpDao; import com.cloud.vm.dao.NicSecondaryIpVO; @@ -324,6 +328,12 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { @Inject MessageBus _messageBus; + @Inject + DomainRouterDao _routerDao; + + @Inject + NetworkDetailsDao _networkDetailsDao; + int _cidrLimit; boolean _allowSubdomainNetworkAccess; @@ -1992,8 +2002,7 @@ private boolean checkForNonStoppedVmInNetwork(long networkId) { @DB @ActionEvent(eventType = EventTypes.EVENT_NETWORK_UPDATE, eventDescription = "updating network", async = true) public Network updateGuestNetwork(final long networkId, String name, String displayText, Account callerAccount, User callerUser, String domainSuffix, - final Long networkOfferingId, Boolean changeCidr, String guestVmCidr, Boolean displayNetwork, String customId) { - + final Long networkOfferingId, Boolean changeCidr, String guestVmCidr, Boolean displayNetwork, String customId, boolean updateInSequence) { boolean restartNetwork = false; // verify input parameters @@ -2239,130 +2248,153 @@ public Network updateGuestNetwork(final long networkId, String name, String disp ReservationContext context = new ReservationContextImpl(null, null, callerUser, callerAccount); // 1) Shutdown all the elements and cleanup all the rules. Don't allow to shutdown network in intermediate // states - Shutdown and Implementing + List routers=null; + int resourceCount=1; + if(updateInSequence && restartNetwork && _networkOfferingDao.findById(network.getNetworkOfferingId()).getRedundantRouter() && networkOfferingId!=null && _networkOfferingDao.findById(networkOfferingId).getRedundantRouter() && network.getVpcId()==null) { + _networkMgr.canUpdateInSequence(network); + NetworkDetailVO networkDetail =new NetworkDetailVO(network.getId(),Network.updatingInSequence,"true",true); + _networkDetailsDao.persist(networkDetail); + _networkMgr.configureUpdateInSequence(network); + resourceCount=_networkMgr.getResourceCount(network); + } + boolean validStateToShutdown = (network.getState() == Network.State.Implemented || network.getState() == Network.State.Setup || network.getState() == Network.State.Allocated); - if (restartNetwork) { - if (validStateToShutdown) { - if (!changeCidr) { - s_logger.debug("Shutting down elements and resources for network id=" + networkId + " as a part of network update"); - - if (!_networkMgr.shutdownNetworkElementsAndResources(context, true, network)) { - s_logger.warn("Failed to shutdown the network elements and resources as a part of network restart: " + network); - CloudRuntimeException ex = new CloudRuntimeException("Failed to shutdown the network elements and resources as a part of update to network of specified id"); - ex.addProxyObject(network.getUuid(), "networkId"); - throw ex; - } - } else { - // We need to shutdown the network, since we want to re-implement the network. - s_logger.debug("Shutting down network id=" + networkId + " as a part of network update"); - - //check if network has reservation - if (NetUtils.isNetworkAWithinNetworkB(network.getCidr(), network.getNetworkCidr())) { - s_logger.warn("Existing IP reservation will become ineffective for the network with id = " + networkId - + " You need to reapply reservation after network reimplementation."); - //set cidr to the newtork cidr - network.setCidr(network.getNetworkCidr()); - //set networkCidr to null to bring network back to no IP reservation state - network.setNetworkCidr(null); - } + try { - if (!_networkMgr.shutdownNetwork(network.getId(), context, true)) { - s_logger.warn("Failed to shutdown the network as a part of update to network with specified id"); - CloudRuntimeException ex = new CloudRuntimeException("Failed to shutdown the network as a part of update of specified network id"); + do { + if (restartNetwork) { + if (validStateToShutdown) { + if (!changeCidr) { + s_logger.debug("Shutting down elements and resources for network id=" + networkId + " as a part of network update"); + + if (!_networkMgr.shutdownNetworkElementsAndResources(context, true, network)) { + s_logger.warn("Failed to shutdown the network elements and resources as a part of network restart: " + network); + CloudRuntimeException ex = new CloudRuntimeException("Failed to shutdown the network elements and resources as a part of update to network of specified id"); + ex.addProxyObject(network.getUuid(), "networkId"); + throw ex; + } + } else { + // We need to shutdown the network, since we want to re-implement the network. + s_logger.debug("Shutting down network id=" + networkId + " as a part of network update"); + + //check if network has reservation + if (NetUtils.isNetworkAWithinNetworkB(network.getCidr(), network.getNetworkCidr())) { + s_logger.warn("Existing IP reservation will become ineffective for the network with id = " + networkId + + " You need to reapply reservation after network reimplementation."); + //set cidr to the newtork cidr + network.setCidr(network.getNetworkCidr()); + //set networkCidr to null to bring network back to no IP reservation state + network.setNetworkCidr(null); + } + + if (!_networkMgr.shutdownNetwork(network.getId(), context, true)) { + s_logger.warn("Failed to shutdown the network as a part of update to network with specified id"); + CloudRuntimeException ex = new CloudRuntimeException("Failed to shutdown the network as a part of update of specified network id"); + ex.addProxyObject(network.getUuid(), "networkId"); + throw ex; + } + } + } else { + CloudRuntimeException ex = new CloudRuntimeException( + "Failed to shutdown the network elements and resources as a part of update to network with specified id; network is in wrong state: " + network.getState()); ex.addProxyObject(network.getUuid(), "networkId"); throw ex; } } - } else { - CloudRuntimeException ex = new CloudRuntimeException( - "Failed to shutdown the network elements and resources as a part of update to network with specified id; network is in wrong state: " + network.getState()); - ex.addProxyObject(network.getUuid(), "networkId"); - throw ex; - } - } - // 2) Only after all the elements and rules are shutdown properly, update the network VO - // get updated network - Network.State networkState = _networksDao.findById(networkId).getState(); - boolean validStateToImplement = (networkState == Network.State.Implemented || networkState == Network.State.Setup || networkState == Network.State.Allocated); - if (restartNetwork && !validStateToImplement) { - CloudRuntimeException ex = new CloudRuntimeException( - "Failed to implement the network elements and resources as a part of update to network with specified id; network is in wrong state: " + networkState); - ex.addProxyObject(network.getUuid(), "networkId"); - throw ex; - } + // 2) Only after all the elements and rules are shutdown properly, update the network VO + // get updated network + Network.State networkState = _networksDao.findById(networkId).getState(); + boolean validStateToImplement = (networkState == Network.State.Implemented || networkState == Network.State.Setup || networkState == Network.State.Allocated); + if (restartNetwork && !validStateToImplement) { + CloudRuntimeException ex = new CloudRuntimeException( + "Failed to implement the network elements and resources as a part of update to network with specified id; network is in wrong state: " + networkState); + ex.addProxyObject(network.getUuid(), "networkId"); + throw ex; + } - if (networkOfferingId != null) { - if (networkOfferingChanged) { - Transaction.execute(new TransactionCallbackNoReturn() { - @Override - public void doInTransactionWithoutResult(TransactionStatus status) { - network.setNetworkOfferingId(networkOfferingId); - _networksDao.update(networkId, network, newSvcProviders); - // get all nics using this network - // log remove usage events for old offering - // log assign usage events for new offering - List nics = _nicDao.listByNetworkId(networkId); - for (NicVO nic : nics) { - long vmId = nic.getInstanceId(); - VMInstanceVO vm = _vmDao.findById(vmId); - if (vm == null) { - s_logger.error("Vm for nic " + nic.getId() + " not found with Vm Id:" + vmId); - continue; + if (networkOfferingId != null) { + if (networkOfferingChanged) { + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + network.setNetworkOfferingId(networkOfferingId); + _networksDao.update(networkId, network, newSvcProviders); + // get all nics using this network + // log remove usage events for old offering + // log assign usage events for new offering + List nics = _nicDao.listByNetworkId(networkId); + for (NicVO nic : nics) { + long vmId = nic.getInstanceId(); + VMInstanceVO vm = _vmDao.findById(vmId); + if (vm == null) { + s_logger.error("Vm for nic " + nic.getId() + " not found with Vm Id:" + vmId); + continue; + } + long isDefault = (nic.isDefaultNic()) ? 1 : 0; + String nicIdString = Long.toString(nic.getId()); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_REMOVE, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), nicIdString, + oldNetworkOfferingId, null, isDefault, VirtualMachine.class.getName(), vm.getUuid(), vm.isDisplay()); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_ASSIGN, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), nicIdString, + networkOfferingId, null, isDefault, VirtualMachine.class.getName(), vm.getUuid(), vm.isDisplay()); + } + } + }); + } else { + network.setNetworkOfferingId(networkOfferingId); + _networksDao.update(networkId, network, + _networkMgr.finalizeServicesAndProvidersForNetwork(_entityMgr.findById(NetworkOffering.class, networkOfferingId), network.getPhysicalNetworkId())); } - long isDefault = (nic.isDefaultNic()) ? 1 : 0; - String nicIdString = Long.toString(nic.getId()); - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_REMOVE, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), nicIdString, - oldNetworkOfferingId, null, isDefault, VirtualMachine.class.getName(), vm.getUuid(), vm.isDisplay()); - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_ASSIGN, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), nicIdString, - networkOfferingId, null, isDefault, VirtualMachine.class.getName(), vm.getUuid(), vm.isDisplay()); + } else { + _networksDao.update(networkId, network); } + + // 3) Implement the elements and rules again + if (restartNetwork) { + if (network.getState() != Network.State.Allocated) { + DeployDestination dest = new DeployDestination(_dcDao.findById(network.getDataCenterId()), null, null, null); + s_logger.debug("Implementing the network " + network + " elements and resources as a part of network update"); + try { + if (!changeCidr) { + _networkMgr.implementNetworkElementsAndResources(dest, context, network, _networkOfferingDao.findById(network.getNetworkOfferingId())); + } else { + _networkMgr.implementNetwork(network.getId(), dest, context); + } + } catch (Exception ex) { + s_logger.warn("Failed to implement network " + network + " elements and resources as a part of network update due to ", ex); + CloudRuntimeException e = new CloudRuntimeException("Failed to implement network (with specified id) elements and resources as a part of network update"); + e.addProxyObject(network.getUuid(), "networkId"); + throw e; + } } - }); - } else { - network.setNetworkOfferingId(networkOfferingId); - _networksDao.update(networkId, network, - _networkMgr.finalizeServicesAndProvidersForNetwork(_entityMgr.findById(NetworkOffering.class, networkOfferingId), network.getPhysicalNetworkId())); - } - } else { - _networksDao.update(networkId, network); - } - - // 3) Implement the elements and rules again - if (restartNetwork) { - if (network.getState() != Network.State.Allocated) { - DeployDestination dest = new DeployDestination(_dcDao.findById(network.getDataCenterId()), null, null, null); - s_logger.debug("Implementing the network " + network + " elements and resources as a part of network update"); - try { - if (!changeCidr) { - _networkMgr.implementNetworkElementsAndResources(dest, context, network, _networkOfferingDao.findById(network.getNetworkOfferingId())); - } else { - _networkMgr.implementNetwork(network.getId(), dest, context); - } - } catch (Exception ex) { - s_logger.warn("Failed to implement network " + network + " elements and resources as a part of network update due to ", ex); - CloudRuntimeException e = new CloudRuntimeException("Failed to implement network (with specified id) elements and resources as a part of network update"); - e.addProxyObject(network.getUuid(), "networkId"); - throw e; } - } - } - // 4) if network has been upgraded from a non persistent ntwk offering to a persistent ntwk offering, - // implement the network if its not already - if (networkOfferingChanged && !oldNtwkOff.getIsPersistent() && networkOffering.getIsPersistent()) { - if (network.getState() == Network.State.Allocated) { - try { - DeployDestination dest = new DeployDestination(_dcDao.findById(network.getDataCenterId()), null, null, null); - _networkMgr.implementNetwork(network.getId(), dest, context); - } catch (Exception ex) { - s_logger.warn("Failed to implement network " + network + " elements and resources as a part o" + "f network update due to ", ex); - CloudRuntimeException e = new CloudRuntimeException("Failed to implement network (with specified" + " id) elements and resources as a part of network update"); - e.addProxyObject(network.getUuid(), "networkId"); - throw e; + // 4) if network has been upgraded from a non persistent ntwk offering to a persistent ntwk offering, + // implement the network if its not already + if (networkOfferingChanged && !oldNtwkOff.getIsPersistent() && networkOffering.getIsPersistent()) { + if (network.getState() == Network.State.Allocated) { + try { + DeployDestination dest = new DeployDestination(_dcDao.findById(network.getDataCenterId()), null, null, null); + _networkMgr.implementNetwork(network.getId(), dest, context); + } catch (Exception ex) { + s_logger.warn("Failed to implement network " + network + " elements and resources as a part o" + "f network update due to ", ex); + CloudRuntimeException e = new CloudRuntimeException("Failed to implement network (with specified" + " id) elements and resources as a part of network update"); + e.addProxyObject(network.getUuid(), "networkId"); + throw e; + } + } + } + resourceCount--; + } while(updateInSequence && resourceCount>0); + }catch (Exception exception){ + throw new CloudRuntimeException("failed to update network "+network.getUuid()+"due to "+exception.getMessage()); + }finally { + if(updateInSequence){ + if( _networkDetailsDao.findDetail(networkId,Network.updatingInSequence)!=null){ + _networkDetailsDao.removeDetail(networkId,Network.updatingInSequence); } } } - return getNetwork(network.getId()); } diff --git a/server/src/com/cloud/network/element/VirtualRouterElement.java b/server/src/com/cloud/network/element/VirtualRouterElement.java index d802188e4c4b..03d19588af3c 100644 --- a/server/src/com/cloud/network/element/VirtualRouterElement.java +++ b/server/src/com/cloud/network/element/VirtualRouterElement.java @@ -24,6 +24,9 @@ import javax.inject.Inject; +import com.cloud.network.dao.NetworkDetailVO; +import com.cloud.network.dao.NetworkDetailsDao; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.configuration.ConfigurationManager; import com.cloud.dc.DataCenter; @@ -109,7 +112,7 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterElementService, DhcpServiceProvider, UserDataServiceProvider, SourceNatServiceProvider, StaticNatServiceProvider, FirewallServiceProvider, LoadBalancingServiceProvider, PortForwardingServiceProvider, RemoteAccessVPNServiceProvider, IpDeployer, -NetworkMigrationResponder, AggregatedCommandExecutor { +NetworkMigrationResponder, AggregatedCommandExecutor, RedundantResource { private static final Logger s_logger = Logger.getLogger(VirtualRouterElement.class); public static final AutoScaleCounterType AutoScaleCounterCpu = new AutoScaleCounterType("cpu"); public static final AutoScaleCounterType AutoScaleCounterMemory = new AutoScaleCounterType("memory"); @@ -159,6 +162,9 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl @Inject NetworkTopologyContext networkTopologyContext; + @Inject + NetworkDetailsDao _networkDetailsDao; + @Inject protected RouterDeploymentDefinitionBuilder routerDeploymentDefinitionBuilder; @@ -262,7 +268,7 @@ public boolean prepare(final Network network, final NicProfile nic, final Virtua public boolean applyFWRules(final Network network, final List rules) throws ResourceUnavailableException { boolean result = true; if (canHandle(network, Service.Firewall)) { - final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); + final List routers = getRouters(network); if (routers == null || routers.isEmpty()) { s_logger.debug("Virtual router elemnt doesn't need to apply firewall rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; @@ -407,7 +413,7 @@ public boolean applyLBRules(final Network network, final List return false; } - final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); + final List routers = getRouters(network); if (routers == null || routers.isEmpty()) { s_logger.debug("Virtual router elemnt doesn't need to apply lb rules on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; @@ -498,7 +504,7 @@ public boolean applyIps(final Network network, final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); + final List routers = getRouters(network); if (routers == null || routers.isEmpty()) { s_logger.debug("Virtual router elemnt doesn't need to associate ip addresses on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; @@ -657,7 +663,7 @@ private static Map> setCapabilities() { public boolean applyStaticNats(final Network network, final List rules) throws ResourceUnavailableException { boolean result = true; if (canHandle(network, Service.StaticNat)) { - final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); + final List routers = getRouters(network); if (routers == null || routers.isEmpty()) { s_logger.debug("Virtual router elemnt doesn't need to apply static nat on the backend; virtual " + "router doesn't exist in the network " + network.getId()); return true; @@ -673,6 +679,46 @@ public boolean applyStaticNats(final Network network, final List getRouters(Network network){ + List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); + if (routers !=null && routers.isEmpty()) { + return null; + } + NetworkDetailVO updateInSequence=_networkDetailsDao.findDetail(network.getId(), Network.updatingInSequence); + if(network.isRedundant() && updateInSequence!=null && "true".equalsIgnoreCase(updateInSequence.getValue())){ + List masterRouters=new ArrayList(); + int noOfrouters=routers.size(); + while (noOfrouters>0){ + DomainRouterVO router = routers.get(0); + if(router.getUpdateState()== VirtualRouter.UpdateState.UPDATE_IN_PROGRESS){ + ArrayList routerList = new ArrayList(); + routerList.add(router); + return routerList; + } + if(router.getUpdateState()== VirtualRouter.UpdateState.UPDATE_COMPLETE) { + routers.remove(router); + noOfrouters--; + continue; + } + if(router.getRedundantState()!=VirtualRouter.RedundantState.BACKUP) { + masterRouters.add(router); + routers.remove(router); + } + noOfrouters--; + } + if(routers.size()==0 && masterRouters.size()==0){ + return null; + } + if(routers.size()==0 && masterRouters.size()!=0){ + routers=masterRouters; + } + routers=routers.subList(0,1); + routers.get(0).setUpdateState(VirtualRouter.UpdateState.UPDATE_IN_PROGRESS); + _routerDao.persist(routers.get(0)); + } + return routers; + } + @Override public boolean shutdown(final Network network, final ReservationContext context, final boolean cleanup) throws ConcurrentOperationException, ResourceUnavailableException { final List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); @@ -1031,7 +1077,7 @@ protected List getRouters(final Network network, final DeployDes List routers; if (publicNetwork) { - routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); + routers = getRouters(network); } else { if (isPodBased && dest.getPod() != null) { final Long podId = dest.getPod().getId(); @@ -1228,7 +1274,27 @@ public boolean completeAggregatedExecution(final Network network, final DeployDe throw new ResourceUnavailableException("Can't find at least one router!", DataCenter.class, network.getDataCenterId()); } - return _routerMgr.completeAggregatedExecution(network, routers); + NetworkDetailVO networkDetail=_networkDetailsDao.findDetail(network.getId(), Network.updatingInSequence); + boolean updateInSequence= "true".equalsIgnoreCase((networkDetail!=null ? networkDetail.getValue() : null)); + if(updateInSequence){ + DomainRouterVO router=routers.get(0); + router.setUpdateState(VirtualRouter.UpdateState.UPDATE_COMPLETE); + _routerDao.persist(router); + } + boolean result=false; + try{ + result=_routerMgr.completeAggregatedExecution(network, routers); + } finally { + if(!result && updateInSequence) { + //fail the network update. even if one router fails we fail the network update. + List routerList = _routerDao.listByNetworkAndRole(network.getId(), VirtualRouter.Role.VIRTUAL_ROUTER); + for (DomainRouterVO router : routerList) { + router.setUpdateState(VirtualRouter.UpdateState.UPDATE_FAILED); + _routerDao.persist(router); + } + } + } + return result; } @Override @@ -1237,4 +1303,22 @@ public boolean cleanupAggregatedExecution(final Network network, final DeployDes // lets not waste another command return true; } + + @Override + public void configureResource(Network network) { + NetworkDetailVO networkDetail=_networkDetailsDao.findDetail(network.getId(), Network.updatingInSequence); + if(networkDetail==null || !"true".equalsIgnoreCase(networkDetail.getValue())) + throw new CloudRuntimeException("failed to configure the resource, network update is not in progress."); + Listrouters = _routerDao.listByNetworkAndRole(network.getId(), VirtualRouter.Role.VIRTUAL_ROUTER); + for(DomainRouterVO router : routers){ + router.setUpdateState(VirtualRouter.UpdateState.UPDATE_NEEDED); + _routerDao.persist(router); + } + } + + @Override + public int getResourceCount(Network network) { + return _routerDao.listByNetworkAndRole(network.getId(), VirtualRouter.Role.VIRTUAL_ROUTER).size(); + } + } diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 3332393f6521..b6c8cf091ae6 100644 --- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -2215,6 +2215,11 @@ public VirtualRouter startRouter(final long routerId, final boolean reprogramNet // verify parameters DomainRouterVO router = _routerDao.findById(routerId); + //clean up the update_state feild + if(router.getUpdateState()== VirtualRouter.UpdateState.UPDATE_FAILED){ + router.setUpdateState(null); + _routerDao.update(router.getId(),router); + } if (router == null) { throw new InvalidParameterValueException("Unable to find router by id " + routerId + "."); } diff --git a/server/src/org/cloud/network/router/deployment/RouterDeploymentDefinition.java b/server/src/org/cloud/network/router/deployment/RouterDeploymentDefinition.java index 19f80b94a438..9b22562a74e9 100644 --- a/server/src/org/cloud/network/router/deployment/RouterDeploymentDefinition.java +++ b/server/src/org/cloud/network/router/deployment/RouterDeploymentDefinition.java @@ -20,6 +20,9 @@ import java.util.List; import java.util.Map; +import com.cloud.network.dao.NetworkDetailVO; +import com.cloud.network.dao.NetworkDetailsDao; +import com.cloud.network.router.VirtualRouter; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.log4j.Logger; @@ -106,6 +109,7 @@ public class RouterDeploymentDefinition { protected Long tableLockId; protected boolean isPublicNetwork; protected PublicIp sourceNatIp; + protected NetworkDetailsDao networkDetailsDao; protected RouterDeploymentDefinition(final Network guestNetwork, final DeployDestination dest, final Account owner, final Map params) { @@ -410,7 +414,13 @@ protected void deployAllVirtualRouters() throws ConcurrentOperationException, In // Don't start the router as we are holding the network lock that // needs to be released at the end of router allocation final DomainRouterVO router = nwHelper.deployRouter(this, false); - + //check if the network update is in progress. + //if update is in progress add the update_pending flag to DomainRouterVO. + NetworkDetailVO detail =networkDetailsDao.findDetail(guestNetwork.getId(),Network.updatingInSequence); + if("true".equalsIgnoreCase(detail!=null ? detail.getValue() : null)) { + router.setUpdateState(VirtualRouter.UpdateState.UPDATE_IN_PROGRESS); + routerDao.persist(router); + } if (router != null) { routerDao.addRouterToGuestNetwork(router, guestNetwork); //Fix according to changes by Sheng Yang in commit ID cb4513379996b262ae378daf00c6388c6b7313cf diff --git a/server/src/org/cloud/network/router/deployment/RouterDeploymentDefinitionBuilder.java b/server/src/org/cloud/network/router/deployment/RouterDeploymentDefinitionBuilder.java index 3ba4fad77de8..3765537a1481 100644 --- a/server/src/org/cloud/network/router/deployment/RouterDeploymentDefinitionBuilder.java +++ b/server/src/org/cloud/network/router/deployment/RouterDeploymentDefinitionBuilder.java @@ -22,6 +22,7 @@ import javax.inject.Inject; +import com.cloud.network.dao.NetworkDetailsDao; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; @@ -96,6 +97,8 @@ public class RouterDeploymentDefinitionBuilder { private VpcManager vpcMgr; @Inject private VlanDao vlanDao; + @Inject + private NetworkDetailsDao networkDetailsDao; @Autowired @Qualifier("networkHelper") @@ -133,6 +136,7 @@ protected RouterDeploymentDefinition injectDependencies( routerDeploymentDefinition.ipv6Dao = ipv6Dao; routerDeploymentDefinition.ipAddressDao = ipAddressDao; routerDeploymentDefinition.serviceOfferingId = offeringId; + routerDeploymentDefinition.networkDetailsDao = networkDetailsDao; routerDeploymentDefinition.nwHelper = nwHelper; diff --git a/server/test/com/cloud/vpc/MockNetworkManagerImpl.java b/server/test/com/cloud/vpc/MockNetworkManagerImpl.java index a5d8a1ab584a..3e80865572b6 100644 --- a/server/test/com/cloud/vpc/MockNetworkManagerImpl.java +++ b/server/test/com/cloud/vpc/MockNetworkManagerImpl.java @@ -247,7 +247,7 @@ public IpAddress getIp(long id) { */ @Override public Network updateGuestNetwork(long networkId, String name, String displayText, Account callerAccount, User callerUser, String domainSuffix, - Long networkOfferingId, Boolean changeCidr, String guestVmCidr, Boolean displayNetwork, String newUUID) { + Long networkOfferingId, Boolean changeCidr, String guestVmCidr, Boolean displayNetwork, String newUUID,boolean updateInSequence) { // TODO Auto-generated method stub return null; } @@ -841,6 +841,21 @@ public void prepareAllNicsForMigration(VirtualMachineProfile vm, DeployDestinati return; } + @Override + public boolean canUpdateInSequence(Network network) { + return false; + } + + @Override + public void configureUpdateInSequence(Network network) { + return; + } + + @Override + public int getResourceCount(Network network) { + return 0; + } + @Override public void prepareNicForMigration(VirtualMachineProfile vm, DeployDestination dest) { // TODO Auto-generated method stub diff --git a/server/test/org/cloud/network/router/deployment/RouterDeploymentDefinitionTest.java b/server/test/org/cloud/network/router/deployment/RouterDeploymentDefinitionTest.java index eff16c16b8d0..6195387dbe22 100644 --- a/server/test/org/cloud/network/router/deployment/RouterDeploymentDefinitionTest.java +++ b/server/test/org/cloud/network/router/deployment/RouterDeploymentDefinitionTest.java @@ -714,7 +714,7 @@ public void testDeployAllVirtualRouters() final DomainRouterVO routerVO2 = mock(DomainRouterVO.class); when(mockNetworkHelper.deployRouter(deploymentUT, false)) .thenReturn(routerVO1).thenReturn(routerVO2); - + when(networkDetailsDao.findById(anyLong())).thenReturn(null); // Execute deploymentUT.deployAllVirtualRouters(); diff --git a/server/test/org/cloud/network/router/deployment/RouterDeploymentDefinitionTestBase.java b/server/test/org/cloud/network/router/deployment/RouterDeploymentDefinitionTestBase.java index 4225083ca2bd..626c2d7acc67 100644 --- a/server/test/org/cloud/network/router/deployment/RouterDeploymentDefinitionTestBase.java +++ b/server/test/org/cloud/network/router/deployment/RouterDeploymentDefinitionTestBase.java @@ -23,6 +23,7 @@ import java.util.List; import java.util.Map; +import com.cloud.network.dao.NetworkDetailsDao; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; @@ -79,6 +80,8 @@ public class RouterDeploymentDefinitionTestBase { @Mock protected NetworkHelper mockNetworkHelper; @Mock + protected NetworkDetailsDao networkDetailsDao; + @Mock protected VpcNetworkHelperImpl vpcNwHelper; @Mock protected VMInstanceDao mockVmDao; diff --git a/setup/db/db/schema-452to460.sql b/setup/db/db/schema-452to460.sql index dfb629f95a0f..e05ad6d255db 100644 --- a/setup/db/db/schema-452to460.sql +++ b/setup/db/db/schema-452to460.sql @@ -420,3 +420,5 @@ INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervis INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'CentOS 7', 246, utc_timestamp(), 0); UPDATE `cloud`.`hypervisor_capabilities` SET `max_data_volumes_limit` = '32' WHERE `hypervisor_capabilities`.`hypervisor_type` = 'KVM'; +ALTER TABLE `cloud`.`domain_router` ADD COLUMN update_state varchar(64) DEFAULT NULL; + From f416332994f28c2158b17ee80e2e03f5a69c22c2 Mon Sep 17 00:00:00 2001 From: Bharat Kumar Date: Thu, 20 Aug 2015 17:21:20 +0530 Subject: [PATCH 023/687] CLOUDSTACK-8751 Added tests --- .../element/VirtualRouterElementTest.java | 151 +++++++++++++++++- 1 file changed, 150 insertions(+), 1 deletion(-) diff --git a/server/test/com/cloud/network/element/VirtualRouterElementTest.java b/server/test/com/cloud/network/element/VirtualRouterElementTest.java index 659277824ea3..4fbc28ee4cb8 100644 --- a/server/test/com/cloud/network/element/VirtualRouterElementTest.java +++ b/server/test/com/cloud/network/element/VirtualRouterElementTest.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyList; import static org.mockito.Matchers.anyLong; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.when; @@ -26,6 +27,11 @@ import java.util.ArrayList; import java.util.List; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.network.dao.NetworkDetailVO; +import com.cloud.network.dao.NetworkDetailsDao; +import com.cloud.network.router.VirtualRouter; +import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.cloud.network.router.deployment.RouterDeploymentDefinitionBuilder; @@ -35,6 +41,8 @@ import org.mockito.InjectMocks; import org.mockito.Matchers; import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; import org.mockito.runners.MockitoJUnitRunner; import com.cloud.cluster.dao.ManagementServerHostDao; @@ -111,6 +119,7 @@ import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.UserVmDetailsDao; import com.cloud.vm.dao.VMInstanceDao; +import org.mockito.stubbing.Answer; @RunWith(MockitoJUnitRunner.class) public class VirtualRouterElementTest { @@ -127,6 +136,7 @@ public class VirtualRouterElementTest { @Mock private ManagementServerHostDao _msHostDao; @Mock private NetworkDao _networkDao; @Mock private NetworkOfferingDao _networkOfferingDao; + @Mock private NetworkDetailsDao _networkDetailsDao; @Mock private NicDao _nicDao; @Mock private NicIpAliasDao _nicIpAliasDao; @Mock private OpRouterMonitorServiceDao _opRouterMonitorServiceDao; @@ -225,6 +235,62 @@ public void testPrepare() { } + @Test + public void testGetRouters1(){ + Network networkUpdateInprogress=new NetworkVO(1l,null,null,null,1l,1l,1l,1l,"d","d","d",null,1l,1l,null,true,null,true); + mockDAOs((NetworkVO)networkUpdateInprogress,testOffering); + //getRoutes should always return the router that is updating. + List routers=virtualRouterElement.getRouters(networkUpdateInprogress); + assertTrue(routers.size()==1); + assertTrue(routers.get(0).getUpdateState()== VirtualRouter.UpdateState.UPDATE_IN_PROGRESS); + } + + @Test + public void testGetRouters2(){ + Network networkUpdateInprogress=new NetworkVO(2l,null,null,null,1l,1l,1l,1l,"d","d","d",null,1l,1l,null,true,null,true); + mockDAOs((NetworkVO)networkUpdateInprogress,testOffering); + //alwyas return backup routers first when both master and backup need update. + List routers=virtualRouterElement.getRouters(networkUpdateInprogress); + assertTrue(routers.size()==1); + assertTrue(routers.get(0).getRedundantState()==RedundantState.BACKUP && routers.get(0).getUpdateState()==VirtualRouter.UpdateState.UPDATE_IN_PROGRESS); + } + + @Test + public void testGetRouters3(){ + Network network=new NetworkVO(3l,null,null,null,1l,1l,1l,1l,"d","d","d",null,1l,1l,null,true,null,true); + mockDAOs((NetworkVO)network,testOffering); + //alwyas return backup routers first when both master and backup need update. + List routers=virtualRouterElement.getRouters(network); + assertTrue(routers.size()==4); + } + + @Test + public void getResourceCountTest(){ + Network network=new NetworkVO(3l,null,null,null,1l,1l,1l,1l,"d","d","d",null,1l,1l,null,true,null,true); + mockDAOs((NetworkVO)network,testOffering); + int routers=virtualRouterElement.getResourceCount(network); + assertTrue(routers==4); + } + + @Test + public void completeAggregationCommandTest1() throws AgentUnavailableException,ResourceUnavailableException { + virtualRouterElement._routerMgr = Mockito.mock(VpcVirtualNetworkApplianceManagerImpl.class); + virtualRouterElement.routerDeploymentDefinitionBuilder = routerDeploymentDefinitionBuilder; + Network network = new NetworkVO(6l, null, null, null, 1l, 1l, 1l, 1l, "d", "d", "d", null, 1l, 1l, null, true, null, true); + when(virtualRouterElement._routerMgr.completeAggregatedExecution(any(Network.class), anyList())).thenReturn(true); + mockDAOs((NetworkVO) network, testOffering); + when(virtualRouterElement._routerDao.persist(any(DomainRouterVO.class))).thenAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocationOnMock) throws Throwable { + Object args[] = invocationOnMock.getArguments(); + DomainRouterVO router = (DomainRouterVO) args[0]; + if (router.getUpdateState() != VirtualRouter.UpdateState.UPDATE_COMPLETE) { + throw new CloudRuntimeException("TestFailed: completeAggregationCommandTest1 failed"); + } else return null; + } + }); + virtualRouterElement.completeAggregatedExecution(network, testDestination); + } /** * @param networks * @param offerings @@ -293,11 +359,94 @@ private void mockDAOs(final NetworkVO network, final NetworkOfferingVO offering) /* haEnabled */ false, /* stopPending */ false, /* vpcId */ null); - + final DomainRouterVO routerNeedUpdateBackup = new DomainRouterVO(/* id */ 2L, + /* serviceOfferingId */ 1L, + /* elementId */ 0L, + "name", + /* templateId */0L, + HypervisorType.XenServer, + /* guestOSId */ 0L, + /* domainId */ 0L, + /* accountId */ 1L, + /* userId */ 1L, + /* isRedundantRouter */ false, + RedundantState.BACKUP, + /* haEnabled */ false, + /* stopPending */ false, + /* vpcId */ null); + routerNeedUpdateBackup.setUpdateState(VirtualRouter.UpdateState.UPDATE_NEEDED); + final DomainRouterVO routerNeedUpdateMaster = new DomainRouterVO(/* id */ 3L, + /* serviceOfferingId */ 1L, + /* elementId */ 0L, + "name", + /* templateId */0L, + HypervisorType.XenServer, + /* guestOSId */ 0L, + /* domainId */ 0L, + /* accountId */ 1L, + /* userId */ 1L, + /* isRedundantRouter */ false, + RedundantState.MASTER, + /* haEnabled */ false, + /* stopPending */ false, + /* vpcId */ null); + routerNeedUpdateMaster.setUpdateState(VirtualRouter.UpdateState.UPDATE_NEEDED); + final DomainRouterVO routerUpdateComplete = new DomainRouterVO(/* id */ 4L, + /* serviceOfferingId */ 1L, + /* elementId */ 0L, + "name", + /* templateId */0L, + HypervisorType.XenServer, + /* guestOSId */ 0L, + /* domainId */ 0L, + /* accountId */ 1L, + /* userId */ 1L, + /* isRedundantRouter */ false, + RedundantState.UNKNOWN, + /* haEnabled */ false, + /* stopPending */ false, + /* vpcId */ null); + routerUpdateComplete.setUpdateState(VirtualRouter.UpdateState.UPDATE_COMPLETE); + final DomainRouterVO routerUpdateInProgress = new DomainRouterVO(/* id */ 5L, + /* serviceOfferingId */ 1L, + /* elementId */ 0L, + "name", + /* templateId */0L, + HypervisorType.XenServer, + /* guestOSId */ 0L, + /* domainId */ 0L, + /* accountId */ 1L, + /* userId */ 1L, + /* isRedundantRouter */ false, + RedundantState.UNKNOWN, + /* haEnabled */ false, + /* stopPending */ false, + /* vpcId */ null); + routerUpdateInProgress.setUpdateState(VirtualRouter.UpdateState.UPDATE_IN_PROGRESS); + List routerList1=new ArrayList<>(); + routerList1.add(routerUpdateComplete); + routerList1.add(routerNeedUpdateBackup); + routerList1.add(routerNeedUpdateMaster); + routerList1.add(routerUpdateInProgress); + List routerList2=new ArrayList<>(); + routerList2.add(routerUpdateComplete); + routerList2.add(routerNeedUpdateBackup); + routerList2.add(routerNeedUpdateMaster); + List routerList3=new ArrayList<>(); + routerList3.add(routerUpdateComplete); + routerList3.add(routerUpdateInProgress); when(_routerDao.getNextInSequence(Long.class, "id")).thenReturn(1L); when(_templateDao.findRoutingTemplate(HypervisorType.XenServer, "SystemVM Template (XenServer)")).thenReturn(new VMTemplateVO()); when(_routerDao.persist(any(DomainRouterVO.class))).thenReturn(router); when(_routerDao.findById(router.getId())).thenReturn(router); + when(_routerDao.listByNetworkAndRole(1l, VirtualRouter.Role.VIRTUAL_ROUTER)).thenReturn(routerList1); + when(_routerDao.listByNetworkAndRole(2l, VirtualRouter.Role.VIRTUAL_ROUTER)).thenReturn(routerList2); + when(_routerDao.listByNetworkAndRole(3l, VirtualRouter.Role.VIRTUAL_ROUTER)).thenReturn(routerList1); + when(_routerDao.listByNetworkAndRole(6l, VirtualRouter.Role.VIRTUAL_ROUTER)).thenReturn(routerList3); + when(_networkDetailsDao.findDetail(1l, Network.updatingInSequence)).thenReturn(new NetworkDetailVO(1l,Network.updatingInSequence,"true",true)); + when(_networkDetailsDao.findDetail(2l, Network.updatingInSequence)).thenReturn(new NetworkDetailVO(2l,Network.updatingInSequence,"true",true)); + when(_networkDetailsDao.findDetail(6l, Network.updatingInSequence)).thenReturn(new NetworkDetailVO(2l,Network.updatingInSequence,"true",true)); + when(_routerDao.persist(any(DomainRouterVO.class))).thenReturn(router); } } From ba9dcba16df604d8d4b84084bc24c04cc27fb9ac Mon Sep 17 00:00:00 2001 From: Bharat Kumar Date: Thu, 26 Nov 2015 15:30:06 +0530 Subject: [PATCH 024/687] Do not update network if one of the router's state is unknown Added checks to prevent netwrok update when router state is unknown or when the new offering removes a service that is in use. Added a new param forced to the updateNetwork API. The network will undergo a forced update when this param is set to true. CLOUDSTACK-8751 Clean network config like firewall rules etc, when network services are removed during network update. --- api/src/com/cloud/network/NetworkService.java | 2 +- .../network/vpn/RemoteAccessVpnService.java | 2 +- .../network/UpdateNetworkCmdByAdmin.java | 2 +- .../user/network/UpdateNetworkCmd.java | 11 +- .../user/vpn/DeleteRemoteAccessVpnCmd.java | 2 +- .../service/NetworkOrchestrationService.java | 4 + .../orchestration/NetworkOrchestrator.java | 113 ++++++++++++++++++ .../cloud/network/IpAddressManagerImpl.java | 2 +- .../com/cloud/network/NetworkServiceImpl.java | 32 ++++- ...VpcVirtualNetworkApplianceManagerImpl.java | 10 ++ .../vpn/RemoteAccessVpnManagerImpl.java | 6 +- .../com/cloud/user/AccountManagerImpl.java | 2 +- .../com/cloud/vpc/MockNetworkManagerImpl.java | 12 +- 13 files changed, 186 insertions(+), 14 deletions(-) diff --git a/api/src/com/cloud/network/NetworkService.java b/api/src/com/cloud/network/NetworkService.java index e26db340ff4f..7a8a94987807 100644 --- a/api/src/com/cloud/network/NetworkService.java +++ b/api/src/com/cloud/network/NetworkService.java @@ -77,7 +77,7 @@ IpAddress allocatePortableIP(Account ipOwner, int regionId, Long zoneId, Long ne IpAddress getIp(long id); Network updateGuestNetwork(long networkId, String name, String displayText, Account callerAccount, User callerUser, String domainSuffix, Long networkOfferingId, - Boolean changeCidr, String guestVmCidr, Boolean displayNetwork, String newUUID, boolean updateInSequence); + Boolean changeCidr, String guestVmCidr, Boolean displayNetwork, String newUUID, boolean updateInSequence, boolean forced); PhysicalNetwork createPhysicalNetwork(Long zoneId, String vnetRange, String networkSpeed, List isolationMethods, String broadcastDomainRange, Long domainId, List tags, String name); diff --git a/api/src/com/cloud/network/vpn/RemoteAccessVpnService.java b/api/src/com/cloud/network/vpn/RemoteAccessVpnService.java index decf8c437330..d089b8524497 100644 --- a/api/src/com/cloud/network/vpn/RemoteAccessVpnService.java +++ b/api/src/com/cloud/network/vpn/RemoteAccessVpnService.java @@ -33,7 +33,7 @@ public interface RemoteAccessVpnService { RemoteAccessVpn createRemoteAccessVpn(long vpnServerAddressId, String ipRange, boolean openFirewall, Boolean forDisplay) throws NetworkRuleConflictException; - boolean destroyRemoteAccessVpnForIp(long ipId, Account caller) throws ResourceUnavailableException; + boolean destroyRemoteAccessVpnForIp(long ipId, Account caller, boolean forceCleanup) throws ResourceUnavailableException; RemoteAccessVpn startRemoteAccessVpn(long vpnServerAddressId, boolean openFirewall) throws ResourceUnavailableException; diff --git a/api/src/org/apache/cloudstack/api/command/admin/network/UpdateNetworkCmdByAdmin.java b/api/src/org/apache/cloudstack/api/command/admin/network/UpdateNetworkCmdByAdmin.java index f2c5119466cc..388348c592c1 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/network/UpdateNetworkCmdByAdmin.java +++ b/api/src/org/apache/cloudstack/api/command/admin/network/UpdateNetworkCmdByAdmin.java @@ -49,7 +49,7 @@ public void execute() throws InsufficientCapacityException, ConcurrentOperationE } Network result = _networkService.updateGuestNetwork(getId(), getNetworkName(), getDisplayText(), callerAccount, - callerUser, getNetworkDomain(), getNetworkOfferingId(), getChangeCidr(), getGuestVmCidr(), getDisplayNetwork(), getCustomId(), getUpdateInSequence()); + callerUser, getNetworkDomain(), getNetworkOfferingId(), getChangeCidr(), getGuestVmCidr(), getDisplayNetwork(), getCustomId(), getUpdateInSequence(),getForced()); if (result != null) { diff --git a/api/src/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java b/api/src/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java index 8ef9251e0475..c313f369b0e2 100644 --- a/api/src/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java @@ -83,6 +83,9 @@ public class UpdateNetworkCmd extends BaseAsyncCustomIdCmd { description = "an optional field, whether to the display the network to the end user or not.", authorized = {RoleType.Admin}) private Boolean displayNetwork; + @Parameter(name= ApiConstants.FORCED, type = CommandType.BOOLEAN, description = "Setting this to true will cause a forced network update,", authorized = {RoleType.Admin}) + private Boolean forced; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -129,6 +132,12 @@ public Boolean getUpdateInSequence(){ return updateInSequence; } + public boolean getForced(){ + if(forced==null){ + return false; + } + return forced; + } ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -159,7 +168,7 @@ public void execute() throws InsufficientCapacityException, ConcurrentOperationE Network result = _networkService.updateGuestNetwork(getId(), getNetworkName(), getDisplayText(), callerAccount, callerUser, getNetworkDomain(), getNetworkOfferingId(), - getChangeCidr(), getGuestVmCidr(), getDisplayNetwork(), getCustomId(), getUpdateInSequence()); + getChangeCidr(), getGuestVmCidr(), getDisplayNetwork(), getCustomId(), getUpdateInSequence(), getForced()); if (result != null) { NetworkResponse response = _responseGenerator.createNetworkResponse(ResponseView.Restricted, result); diff --git a/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteRemoteAccessVpnCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteRemoteAccessVpnCmd.java index 37b7b5aaa3e7..12ab531375f1 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteRemoteAccessVpnCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpn/DeleteRemoteAccessVpnCmd.java @@ -93,7 +93,7 @@ public String getEventType() { @Override public void execute() throws ResourceUnavailableException { - if (! _ravService.destroyRemoteAccessVpnForIp(publicIpId, CallContext.current().getCallingAccount())) { + if (! _ravService.destroyRemoteAccessVpnForIp(publicIpId, CallContext.current().getCallingAccount(), false)) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete remote access vpn"); } } diff --git a/engine/api/src/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java b/engine/api/src/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java index 1e2761f274a8..89bec1783f61 100644 --- a/engine/api/src/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java +++ b/engine/api/src/org/apache/cloudstack/engine/orchestration/service/NetworkOrchestrationService.java @@ -227,6 +227,10 @@ void implementNetworkElementsAndResources(DeployDestination dest, ReservationCon boolean canUpdateInSequence(Network network); + List getServicesNotSupportedInNewOffering(Network network, long newNetworkOfferingId); + + void cleanupConfigForServicesInNetwork(List services, Network network); + void configureUpdateInSequence(Network network); int getResourceCount(Network network); diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index c0ea2f9b635e..5a89dac7810e 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -39,6 +39,9 @@ import com.cloud.network.Networks; import com.cloud.network.dao.NetworkDetailsDao; +import com.cloud.network.dao.RemoteAccessVpnDao; +import com.cloud.network.dao.RemoteAccessVpnVO; +import com.cloud.network.dao.VpnUserDao; import com.cloud.network.element.RedundantResource; import com.cloud.vm.dao.DomainRouterDao; import org.apache.log4j.Logger; @@ -271,6 +274,10 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra VMNetworkMapDao _vmNetworkMapDao; @Inject DomainRouterDao _rotuerDao; + @Inject + RemoteAccessVpnDao _remoteAccessVpnDao; + @Inject + VpnUserDao _vpnUserDao; List networkGurus; @@ -1283,6 +1290,7 @@ protected boolean prepareElement(final NetworkElement element, final Network net @Override public boolean canUpdateInSequence(Network network){ List providers = getNetworkProviders(network.getId()); + //check if the there are no service provider other than virtualrouter. for(Provider provider :providers){ if(provider!=Provider.VirtualRouter) @@ -1291,6 +1299,111 @@ public boolean canUpdateInSequence(Network network){ return true; } + @Override + public List getServicesNotSupportedInNewOffering(Network network,long newNetworkOfferingId){ + NetworkOffering offering =_networkOfferingDao.findById(newNetworkOfferingId); + List services=_ntwkOfferingSrvcDao.listServicesForNetworkOffering(offering.getId()); + List serviceMap= _ntwkSrvcDao.getServicesInNetwork(network.getId()); + List servicesNotInNewOffering=new ArrayList<>(); + for(NetworkServiceMapVO serviceVO :serviceMap){ + boolean inlist=false; + for(String service: services){ + if(serviceVO.getService().equalsIgnoreCase(service)){ + inlist=true; + break; + } + } + if(!inlist){ + //ignore Gateway service as this has no effect on the + //behaviour of network. + if(!serviceVO.getService().equalsIgnoreCase(Service.Gateway.getName())) + servicesNotInNewOffering.add(serviceVO.getService()); + } + } + return servicesNotInNewOffering; + } + + @Override + public void cleanupConfigForServicesInNetwork(List services, final Network network){ + long networkId=network.getId(); + Account caller=_accountDao.findById(Account.ACCOUNT_ID_SYSTEM); + long userId=User.UID_SYSTEM; + //remove all PF/Static Nat rules for the network + s_logger.info("Services:"+services+" are no longer supported in network:"+network.getUuid()+ + " after applying new network offering:"+network.getNetworkOfferingId()+" removing the related configuration"); + if(services.contains(Service.StaticNat.getName())|| services.contains(Service.PortForwarding.getName())) { + try { + if (_rulesMgr.revokeAllPFStaticNatRulesForNetwork(networkId, userId, caller)) { + s_logger.debug("Successfully cleaned up portForwarding/staticNat rules for network id=" + networkId); + } else { + s_logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup"); + } + if(services.contains(Service.StaticNat.getName())){ + //removing static nat configured on ips. + //optimizing the db operations using transaction. + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + List ips = _ipAddressDao.listStaticNatPublicIps(network.getId()); + for (IPAddressVO ip : ips) { + ip.setOneToOneNat(false); + ip.setAssociatedWithVmId(null); + ip.setVmIp(null); + _ipAddressDao.update(ip.getId(),ip); + } + } + }); + } + } catch (ResourceUnavailableException ex) { + s_logger.warn("Failed to release portForwarding/StaticNat rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); + } + } + if(services.contains(Service.SourceNat.getName())){ + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + List ips = _ipAddressDao.listByAssociatedNetwork(network.getId(),true); + //removing static nat configured on ips. + for (IPAddressVO ip : ips) { + ip.setSourceNat(false); + _ipAddressDao.update(ip.getId(),ip); + } + } + }); + } + if(services.contains(Service.Lb.getName())){ + //remove all LB rules for the network + if (_lbMgr.removeAllLoadBalanacersForNetwork(networkId, caller, userId)) { + s_logger.debug("Successfully cleaned up load balancing rules for network id=" + networkId); + } else { + s_logger.warn("Failed to cleanup LB rules as a part of network id=" + networkId + " cleanup"); + } + } + + if(services.contains(Service.Firewall.getName())){ + //revoke all firewall rules for the network + try { + if (_firewallMgr.revokeAllFirewallRulesForNetwork(networkId, userId, caller)) { + s_logger.debug("Successfully cleaned up firewallRules rules for network id=" + networkId); + } else { + s_logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup"); + } + } catch (ResourceUnavailableException ex) { + s_logger.warn("Failed to cleanup Firewall rules as a part of network id=" + networkId + " cleanup due to resourceUnavailable ", ex); + } + } + + //do not remove vpn service for vpc networks. + if(services.contains(Service.Vpn.getName()) && network.getVpcId()==null){ + RemoteAccessVpnVO vpn = _remoteAccessVpnDao.findByAccountAndNetwork(network.getAccountId(),networkId); + try { + _vpnMgr.destroyRemoteAccessVpnForIp(vpn.getServerAddressId(), caller, true); + } catch (ResourceUnavailableException ex) { + s_logger.warn("Failed to cleanup remote access vpn resources of network:"+network.getUuid() + " due to Exception: ", ex); + } + } + } + @Override public void configureUpdateInSequence(Network network) { List providers = getNetworkProviders(network.getId()); diff --git a/server/src/com/cloud/network/IpAddressManagerImpl.java b/server/src/com/cloud/network/IpAddressManagerImpl.java index e65adb60f7d5..8a2c3fd7b1f4 100644 --- a/server/src/com/cloud/network/IpAddressManagerImpl.java +++ b/server/src/com/cloud/network/IpAddressManagerImpl.java @@ -562,7 +562,7 @@ protected boolean cleanupIpResources(long ipId, long userId, Account caller) { // the code would be triggered s_logger.debug("Cleaning up remote access vpns as a part of public IP id=" + ipId + " release..."); try { - _vpnMgr.destroyRemoteAccessVpnForIp(ipId, caller); + _vpnMgr.destroyRemoteAccessVpnForIp(ipId, caller,false); } catch (ResourceUnavailableException e) { s_logger.warn("Unable to destroy remote access vpn for ip id=" + ipId + " as a part of ip release", e); success = false; diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java index bb573decde28..cade54f088c4 100644 --- a/server/src/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/com/cloud/network/NetworkServiceImpl.java @@ -39,6 +39,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.network.router.VirtualRouter; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ApiConstants; @@ -2002,7 +2003,7 @@ private boolean checkForNonStoppedVmInNetwork(long networkId) { @DB @ActionEvent(eventType = EventTypes.EVENT_NETWORK_UPDATE, eventDescription = "updating network", async = true) public Network updateGuestNetwork(final long networkId, String name, String displayText, Account callerAccount, User callerUser, String domainSuffix, - final Long networkOfferingId, Boolean changeCidr, String guestVmCidr, Boolean displayNetwork, String customId, boolean updateInSequence) { + final Long networkOfferingId, Boolean changeCidr, String guestVmCidr, Boolean displayNetwork, String customId, boolean updateInSequence, boolean forced) { boolean restartNetwork = false; // verify input parameters @@ -2248,14 +2249,39 @@ public Network updateGuestNetwork(final long networkId, String name, String disp ReservationContext context = new ReservationContextImpl(null, null, callerUser, callerAccount); // 1) Shutdown all the elements and cleanup all the rules. Don't allow to shutdown network in intermediate // states - Shutdown and Implementing - List routers=null; int resourceCount=1; - if(updateInSequence && restartNetwork && _networkOfferingDao.findById(network.getNetworkOfferingId()).getRedundantRouter() && networkOfferingId!=null && _networkOfferingDao.findById(networkOfferingId).getRedundantRouter() && network.getVpcId()==null) { + if(updateInSequence && restartNetwork && _networkOfferingDao.findById(network.getNetworkOfferingId()).getRedundantRouter() + && (networkOfferingId==null || _networkOfferingDao.findById(networkOfferingId).getRedundantRouter()) && network.getVpcId()==null) { _networkMgr.canUpdateInSequence(network); NetworkDetailVO networkDetail =new NetworkDetailVO(network.getId(),Network.updatingInSequence,"true",true); _networkDetailsDao.persist(networkDetail); _networkMgr.configureUpdateInSequence(network); resourceCount=_networkMgr.getResourceCount(network); + //check if routers are in correct state before proceeding with the update + List routers=_routerDao.listByNetworkAndRole(networkId, VirtualRouter.Role.VIRTUAL_ROUTER); + for(DomainRouterVO router :routers){ + if(router.getRedundantState()== VirtualRouter.RedundantState.UNKNOWN){ + if(!forced){ + throw new CloudRuntimeException("Domain router: "+router.getInstanceName()+" is in unknown state, Cannot update network. set parameter forced to true for forcing an update"); + } + } + } + } + List servicesNotInNewOffering = null; + if(networkOfferingId != null) + servicesNotInNewOffering = _networkMgr.getServicesNotSupportedInNewOffering(network,networkOfferingId); + if(!forced && servicesNotInNewOffering != null && !servicesNotInNewOffering.isEmpty()){ + NetworkOfferingVO newOffering = _networkOfferingDao.findById(networkOfferingId); + throw new CloudRuntimeException("The new offering:"+newOffering.getUniqueName() + +" will remove the following services "+servicesNotInNewOffering +"along with all the related configuration currently in use. will not proceed with the network update." + + "set forced parameter to true for forcing an update."); + } + try{ + if(servicesNotInNewOffering!=null && !servicesNotInNewOffering.isEmpty()){ + _networkMgr.cleanupConfigForServicesInNetwork(servicesNotInNewOffering,network); + } + }catch (Throwable e){ + s_logger.debug("failed to cleanup config related to unused services error:"+e.getMessage()); } boolean validStateToShutdown = (network.getState() == Network.State.Implemented || network.getState() == Network.State.Setup || network.getState() == Network.State.Allocated); diff --git a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java index c2d923cb9512..7b82125dccfb 100644 --- a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java @@ -696,6 +696,16 @@ public List getVpcRouters(final long vpcId) { return _routerDao.listByVpcId(vpcId); } + @Override + public boolean start() { + return true; + } + + @Override + public boolean stop() { + return true; + } + @Override public boolean startRemoteAccessVpn(final RemoteAccessVpn vpn, final VirtualRouter router) throws ResourceUnavailableException { if (router.getState() != State.Running) { diff --git a/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java b/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java index b473f050e040..065c097f0f8b 100644 --- a/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java +++ b/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java @@ -281,7 +281,7 @@ private void validateRemoteAccessVpnConfiguration() throws ConfigurationExceptio @Override @DB @ActionEvent(eventType = EventTypes.EVENT_REMOTE_ACCESS_VPN_DESTROY, eventDescription = "removing remote access vpn", async = true) - public boolean destroyRemoteAccessVpnForIp(long ipId, Account caller) throws ResourceUnavailableException { + public boolean destroyRemoteAccessVpnForIp(long ipId, Account caller, final boolean forceCleanup) throws ResourceUnavailableException { final RemoteAccessVpnVO vpn = _remoteAccessVpnDao.findByPublicIpAddress(ipId); if (vpn == null) { s_logger.debug("there are no Remote access vpns for public ip address id=" + ipId); @@ -309,7 +309,7 @@ public boolean destroyRemoteAccessVpnForIp(long ipId, Account caller) throws Res RemoteAccessVpn.State.Running); success = false; } finally { - if (success) { + if (success|| forceCleanup) { //Cleanup corresponding ports final List vpnFwRules = _rulesDao.listByIpAndPurpose(ipId, Purpose.Vpn); @@ -339,7 +339,7 @@ public void doInTransactionWithoutResult(TransactionStatus status) { success = _firewallMgr.applyIngressFirewallRules(ipId, caller); } - if (success) { + if (success|| forceCleanup) { try { Transaction.execute(new TransactionCallbackNoReturn() { @Override diff --git a/server/src/com/cloud/user/AccountManagerImpl.java b/server/src/com/cloud/user/AccountManagerImpl.java index 7e80681caa20..880d363b8e6f 100644 --- a/server/src/com/cloud/user/AccountManagerImpl.java +++ b/server/src/com/cloud/user/AccountManagerImpl.java @@ -786,7 +786,7 @@ protected boolean cleanupAccount(AccountVO account, long callerUserId, Account c try { for (RemoteAccessVpnVO vpn : remoteAccessVpns) { - _remoteAccessVpnMgr.destroyRemoteAccessVpnForIp(vpn.getServerAddressId(), caller); + _remoteAccessVpnMgr.destroyRemoteAccessVpnForIp(vpn.getServerAddressId(), caller, false); } } catch (ResourceUnavailableException ex) { s_logger.warn("Failed to cleanup remote access vpn resources as a part of account id=" + accountId + " cleanup due to Exception: ", ex); diff --git a/server/test/com/cloud/vpc/MockNetworkManagerImpl.java b/server/test/com/cloud/vpc/MockNetworkManagerImpl.java index 3e80865572b6..6d2348f97d85 100644 --- a/server/test/com/cloud/vpc/MockNetworkManagerImpl.java +++ b/server/test/com/cloud/vpc/MockNetworkManagerImpl.java @@ -247,7 +247,7 @@ public IpAddress getIp(long id) { */ @Override public Network updateGuestNetwork(long networkId, String name, String displayText, Account callerAccount, User callerUser, String domainSuffix, - Long networkOfferingId, Boolean changeCidr, String guestVmCidr, Boolean displayNetwork, String newUUID,boolean updateInSequence) { + Long networkOfferingId, Boolean changeCidr, String guestVmCidr, Boolean displayNetwork, String newUUID,boolean updateInSequence, boolean forced) { // TODO Auto-generated method stub return null; } @@ -846,6 +846,16 @@ public boolean canUpdateInSequence(Network network) { return false; } + @Override + public List getServicesNotSupportedInNewOffering(Network network, long newNetworkOfferingId) { + return null; + } + + @Override + public void cleanupConfigForServicesInNetwork(List services, Network network) { + return; + } + @Override public void configureUpdateInSequence(Network network) { return; From 5f4439007c22798b80186b13cd1dbb2d29492224 Mon Sep 17 00:00:00 2001 From: Bharat Kumar Date: Tue, 22 Dec 2015 23:51:31 +0530 Subject: [PATCH 025/687] Added a test to test update router in sequence --- .../component/maint/test_redundant_router.py | 154 ++++++++++++++++++ 1 file changed, 154 insertions(+) diff --git a/test/integration/component/maint/test_redundant_router.py b/test/integration/component/maint/test_redundant_router.py index 98b0bae1ba09..cad1d341d55b 100644 --- a/test/integration/component/maint/test_redundant_router.py +++ b/test/integration/component/maint/test_redundant_router.py @@ -29,6 +29,7 @@ get_zone, get_process_status) import time +import multiprocessing # Import Local Modules from marvin.cloudstackTestCase import cloudstackTestCase @@ -872,9 +873,16 @@ def setUpClass(cls): cls.testdata["nw_off_isolated_RVR"], conservemode=True ) + cls.network_offering_for_update=NetworkOffering.create( + cls.api_client, + cls.testdata["nw_off_isolated_RVR"], + conservemode=True + ) + cls._cleanup.append(cls.network_offering_for_update) cls._cleanup.append(cls.network_offering) # Enable Network offering cls.network_offering.update(cls.api_client, state='Enabled') + cls.network_offering_for_update.update(cls.api_client, state='Enabled') return @classmethod @@ -1511,3 +1519,149 @@ def test_05_stopBackupRvR_startInstance(self): "Redundant state of the router should be BACKUP but is %s" % routers[0].redundantstate) return + + def updateNetwork(self, conn): + try: + self.network.update( + self.api_client, + networkofferingid=self.network_offering_for_update.id, + updateinsequence=True, + forced=True, + changecidr=False + ) + except Exception as e: + conn.send("Failed to update network: %s due to %s"%(self.network.name, e)) + conn.send("update Network Complete") + return + + + + def get_master_and_backupRouter(self): + retry = 4 + master_router = backup_router=None + while retry > 0: + routers = Router.list( + self.apiclient, + networkid=self.network.id, + listall=True + ) + retry = retry-1 + if not (routers[0].redundantstate == 'MASTER' or routers[1].redundantstate == 'MASTER'): + continue; + if routers[0].redundantstate == 'MASTER': + master_router = routers[0] + backup_router = routers[1] + break + else: + master_router = routers[1] + backup_router = routers[0] + break + return master_router, backup_router + + + def chek_for_new_backupRouter(self,old_backup_router): + master_router, backup_router = self.get_master_and_backupRouter() + retry = 4 + self.info("Checking if new router is getting created.") + self.info("old_backup_router:"+old_backup_router.name+" new_backup_router:"+backup_router.name) + while old_backup_router.name == backup_router.name: + self.debug("waiting for new router old router:"+backup_router.name) + retry = retry-1 + if retry == 0: + break; + time.sleep(self.testdata["sleep"]) + master_router, backup_router = self.get_master_and_backupRouter() + if retry == 0: + self.fail("New router creation taking too long, timed out") + + def wait_untill_router_stabilises(self): + retry=4 + while retry > 0: + routers = Router.list( + self.apiclient, + networkid=self.network.id, + listall=True + ) + retry = retry-1 + self.info("waiting untill state of the routers is stable") + if routers[0].redundantstate != 'UNKNOWN' and routers[1].redundantstate != 'UNKNOWN': + return + elif retry==0: + self.fail("timedout while waiting for routers to stabilise") + return + time.sleep(self.testdata["sleep"]) + + @attr(tags=["bharat"]) + def test_06_updateVRs_in_sequence(self): + """Test update network and check if VRs are updated in sequence + """ + + # Steps to validate + # update network to a new offering + # check if the master router is running while backup is starting. + # check if the backup is running while master is starting. + # check if both the routers are running after the update is complete. + + #clean up the network to make sure it is in proper state. + self.network.restart(self.apiclient,cleanup=True) + time.sleep(self.testdata["sleep"]) + self.wait_untill_router_stabilises() + old_master_router, old_backup_router = self.get_master_and_backupRouter() + self.info("old_master_router:"+old_master_router.name+" old_backup_router"+old_backup_router.name) + #chek if the network is in correct state + self.assertEqual(old_master_router.state, "Running", "The master router is not running, network is not in a correct state to start the test") + self.assertEqual(old_backup_router.state, "Running", "The backup router is not running, network is not in a correct state to start the test") + + worker, monitor = multiprocessing.Pipe() + worker_process = multiprocessing.Process(target=self.updateNetwork, args=(worker,)) + worker_process.start() + if not worker_process.is_alive(): + message = monitor.recv() + if "Complete" not in message: + self.fail(message) + + self.info("Network update Started, the old backup router will get destroyed and a new router will be created") + + self.chek_for_new_backupRouter(old_backup_router) + master_router, new_backup_router=self.get_master_and_backupRouter() + #the state of the master router should be running. while backup is being updated + self.assertEqual(master_router.state, "Running", "State of the master router is not running") + self.assertEqual(master_router.redundantstate, 'MASTER', "Redundant state of the master router should be MASTER, but it is %s"%master_router.redundantstate) + self.info("Old backup router:"+old_backup_router.name+" is destroyed and new router:"+new_backup_router.name+" got created") + + #wait for the new backup to become master. + retry = 4 + while new_backup_router.name != master_router.name: + retry = retry-1 + if retry == 0: + break + time.sleep(self.testdata["sleep"]) + self.info("wating for backup router to become master router name:"+new_backup_router.name) + master_router, backup_router = self.get_master_and_backupRouter() + if retry == 0: + self.fail("timed out while waiting for new backup router to change state to MASTER.") + + #new backup router has become master. + self.info("newly created router:"+new_backup_router.name+" has changed state to Master") + self.info("old master router:"+old_master_router.name+"is destroyed") + #old master will get destroyed and a new backup will be created. + #wait until new backup changes state from unknown to backup + master_router, backup_router = self.get_master_and_backupRouter() + retry = 4 + while backup_router.redundantstate != 'BACKUP': + retry = retry-1 + self.info("waiting for router:"+backup_router.name+" to change state to Backup") + if retry == 0: + break + time.sleep(self.testdata["sleep"]) + master_router, backup_router = self.get_master_and_backupRouter() + self.assertEqual(master_router.state, "Running", "State of the master router is not running") + self.assertEqual(master_router.redundantstate, 'MASTER', "Redundant state of the master router should be MASTER, but it is %s"%master_router.redundantstate) + if retry == 0: + self.fail("timed out while waiting for new backup rotuer to change state to MASTER.") + + #the network update is complete.finally both the router should be running. + new_master_router, new_backup_router=self.get_master_and_backupRouter() + self.assertEqual(new_master_router.state, "Running", "State of the master router:"+new_master_router.name+" is not running") + self.assertEqual(new_backup_router.state, "Running", "State of the backup router:"+new_backup_router.name+" is not running") + worker_process.join() From 8805f955a33fcf5b71997fb534314c7608a3db4f Mon Sep 17 00:00:00 2001 From: Bharat Kumar Date: Mon, 12 Sep 2016 15:34:07 +0530 Subject: [PATCH 026/687] Added license headders --- .../cloud/network/element/RedundantResource.java | 16 ++++++++++++++++ setup/db/db/schema-452to460.sql | 2 +- setup/db/db/schema-4910to41000.sql | 2 ++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/api/src/com/cloud/network/element/RedundantResource.java b/api/src/com/cloud/network/element/RedundantResource.java index 863c9cd330c5..39b6b97d73a6 100644 --- a/api/src/com/cloud/network/element/RedundantResource.java +++ b/api/src/com/cloud/network/element/RedundantResource.java @@ -1,3 +1,19 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. package com.cloud.network.element; import com.cloud.network.Network; diff --git a/setup/db/db/schema-452to460.sql b/setup/db/db/schema-452to460.sql index e05ad6d255db..3b380f397e66 100644 --- a/setup/db/db/schema-452to460.sql +++ b/setup/db/db/schema-452to460.sql @@ -420,5 +420,5 @@ INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervis INSERT IGNORE INTO `cloud`.`guest_os_hypervisor` (uuid,hypervisor_type, hypervisor_version, guest_os_name, guest_os_id, created, is_user_defined) VALUES (UUID(),'KVM', 'default', 'CentOS 7', 246, utc_timestamp(), 0); UPDATE `cloud`.`hypervisor_capabilities` SET `max_data_volumes_limit` = '32' WHERE `hypervisor_capabilities`.`hypervisor_type` = 'KVM'; -ALTER TABLE `cloud`.`domain_router` ADD COLUMN update_state varchar(64) DEFAULT NULL; + diff --git a/setup/db/db/schema-4910to41000.sql b/setup/db/db/schema-4910to41000.sql index f18dbf3f49a5..8ea67e196bdc 100644 --- a/setup/db/db/schema-4910to41000.sql +++ b/setup/db/db/schema-4910to41000.sql @@ -18,3 +18,5 @@ --; -- Schema upgrade from 4.9.1.0 to 4.10.0.0; --; + +ALTER TABLE `cloud`.`domain_router` ADD COLUMN update_state varchar(64) DEFAULT NULL; \ No newline at end of file From b508fb8692eac1675a4597c9dfaef463304aecba Mon Sep 17 00:00:00 2001 From: Mike Tutkowski Date: Sat, 20 Aug 2016 17:58:30 -0600 Subject: [PATCH 027/687] Adding support for cross-cluster storage migration for managed storage when using XenServer --- api/src/com/cloud/storage/StoragePool.java | 2 + .../api/MigrateWithStorageReceiveCommand.java | 11 +- .../agent/test/BackupSnapshotCommandTest.java | 3 + .../agent/test/CheckNetworkAnswerTest.java | 3 + .../api/agent/test/SnapshotCommandTest.java | 3 + .../api/storage/PrimaryDataStoreDriver.java | 7 + .../cloud/vm/VirtualMachineManagerImpl.java | 88 ++- .../storage/datastore/db/StoragePoolVO.java | 1 + ...grateWithStorageReceiveCommandWrapper.java | 13 +- ...CitrixCreateStoragePoolCommandWrapper.java | 31 +- ...CitrixDeleteStoragePoolCommandWrapper.java | 30 +- .../XenServerStorageMotionStrategy.java | 222 +++++- .../xenbase/XenServer610WrapperTest.java | 6 +- .../SolidFirePrimaryDataStoreDriver.java | 190 ++++- .../cloud/server/ManagementServerImpl.java | 23 +- .../plugins/solidfire/TestAddRemoveHosts.py | 58 +- .../plugins/solidfire/TestSnapshots.py | 580 +++++++++++---- .../solidfire/TestVMMigrationWithStorage.py | 697 ++++++++++++++++++ .../plugins/solidfire/TestVMSnapshots.py | 74 +- .../plugins/solidfire/TestVolumes.py | 548 +++++--------- .../plugins/solidfire/util/sf_util.py | 217 ++++++ 21 files changed, 2083 insertions(+), 724 deletions(-) create mode 100644 test/integration/plugins/solidfire/TestVMMigrationWithStorage.py create mode 100644 test/integration/plugins/solidfire/util/sf_util.py diff --git a/api/src/com/cloud/storage/StoragePool.java b/api/src/com/cloud/storage/StoragePool.java index 8e03c3348f3a..3a2d3bd8feec 100644 --- a/api/src/com/cloud/storage/StoragePool.java +++ b/api/src/com/cloud/storage/StoragePool.java @@ -104,4 +104,6 @@ public interface StoragePool extends Identity, InternalIdentity { boolean isInMaintenance(); Hypervisor.HypervisorType getHypervisor(); + + boolean isManaged(); } diff --git a/core/src/com/cloud/agent/api/MigrateWithStorageReceiveCommand.java b/core/src/com/cloud/agent/api/MigrateWithStorageReceiveCommand.java index 66aecdbddca1..3d413fc00454 100644 --- a/core/src/com/cloud/agent/api/MigrateWithStorageReceiveCommand.java +++ b/core/src/com/cloud/agent/api/MigrateWithStorageReceiveCommand.java @@ -21,26 +21,25 @@ import java.util.List; -import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.agent.api.to.VolumeTO; import com.cloud.utils.Pair; public class MigrateWithStorageReceiveCommand extends Command { VirtualMachineTO vm; - List> volumeToFiler; + List> volumeToStorageUuid; - public MigrateWithStorageReceiveCommand(VirtualMachineTO vm, List> volumeToFiler) { + public MigrateWithStorageReceiveCommand(VirtualMachineTO vm, List> volumeToStorageUuid) { this.vm = vm; - this.volumeToFiler = volumeToFiler; + this.volumeToStorageUuid = volumeToStorageUuid; } public VirtualMachineTO getVirtualMachine() { return vm; } - public List> getVolumeToFiler() { - return volumeToFiler; + public List> getVolumeToStorageUuid() { + return volumeToStorageUuid; } @Override diff --git a/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java b/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java index bdcda38f3f06..edc90aab770b 100644 --- a/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java +++ b/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java @@ -134,6 +134,9 @@ public int getPort() { return 25; }; + @Override + public boolean isManaged() { return false; } + @Override public Long getPodId() { return 0L; diff --git a/core/test/org/apache/cloudstack/api/agent/test/CheckNetworkAnswerTest.java b/core/test/org/apache/cloudstack/api/agent/test/CheckNetworkAnswerTest.java index d6f0bfc3b189..4d49c99ee900 100644 --- a/core/test/org/apache/cloudstack/api/agent/test/CheckNetworkAnswerTest.java +++ b/core/test/org/apache/cloudstack/api/agent/test/CheckNetworkAnswerTest.java @@ -173,6 +173,9 @@ public int getPort() { return 25; }; + @Override + public boolean isManaged() { return false; } + @Override public Long getPodId() { return 0L; diff --git a/core/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java b/core/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java index 629669abdd08..576419ab652a 100644 --- a/core/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java +++ b/core/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java @@ -135,6 +135,9 @@ public int getPort() { return 25; }; + @Override + public boolean isManaged() { return false; } + @Override public Long getPodId() { return 0L; diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java index 6dcdf4f0c7ca..8749589f12c2 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java @@ -25,6 +25,13 @@ import com.cloud.storage.StoragePool; public interface PrimaryDataStoreDriver extends DataStoreDriver { + String BASIC_CREATE = "basicCreate"; + String BASIC_DELETE = "basicDelete"; + String BASIC_DELETE_FAILURE = "basicDeleteFailure"; + String BASIC_GRANT_ACCESS = "basicGrantAccess"; + String BASIC_REVOKE_ACCESS = "basicRevokeAccess"; + String BASIC_IQN = "basicIqn"; + ChapInfo getChapInfo(DataObject dataObject); boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore); diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java index 9523b928ed4d..a4c98899e5db 100644 --- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -2045,62 +2045,74 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy private Map getPoolListForVolumesForMigration(final VirtualMachineProfile profile, final Host host, final Map volumeToPool) { final List allVolumes = _volsDao.findUsableVolumesForInstance(profile.getId()); - final Map volumeToPoolObjectMap = new HashMap (); + final Map volumeToPoolObjectMap = new HashMap<>(); + for (final VolumeVO volume : allVolumes) { final Long poolId = volumeToPool.get(Long.valueOf(volume.getId())); - final StoragePoolVO pool = _storagePoolDao.findById(poolId); + final StoragePoolVO destPool = _storagePoolDao.findById(poolId); final StoragePoolVO currentPool = _storagePoolDao.findById(volume.getPoolId()); final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); - if (pool != null) { + + if (destPool != null) { // Check if pool is accessible from the destination host and disk offering with which the volume was // created is compliant with the pool type. - if (_poolHostDao.findByPoolHost(pool.getId(), host.getId()) == null || pool.isLocal() != diskOffering.getUseLocalStorage()) { + if (_poolHostDao.findByPoolHost(destPool.getId(), host.getId()) == null || destPool.isLocal() != diskOffering.getUseLocalStorage()) { // Cannot find a pool for the volume. Throw an exception. - throw new CloudRuntimeException("Cannot migrate volume " + volume + " to storage pool " + pool + " while migrating vm to host " + host + + throw new CloudRuntimeException("Cannot migrate volume " + volume + " to storage pool " + destPool + " while migrating vm to host " + host + ". Either the pool is not accessible from the host or because of the offering with which the volume is created it cannot be placed on " + "the given pool."); - } else if (pool.getId() == currentPool.getId()) { - // If the pool to migrate too is the same as current pool, the volume doesn't need to be migrated. + } else if (destPool.getId() == currentPool.getId()) { + // If the pool to migrate to is the same as current pool, the volume doesn't need to be migrated. } else { - volumeToPoolObjectMap.put(volume, pool); + volumeToPoolObjectMap.put(volume, destPool); } } else { - // Find a suitable pool for the volume. Call the storage pool allocator to find the list of pools. - final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType()); - final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), null, null); - final ExcludeList avoid = new ExcludeList(); - boolean currentPoolAvailable = false; - - final List poolList = new ArrayList(); - for (final StoragePoolAllocator allocator : _storagePoolAllocators) { - final List poolListFromAllocator = allocator.allocateToPool(diskProfile, profile, plan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL); - if (poolListFromAllocator != null && !poolListFromAllocator.isEmpty()) { - poolList.addAll(poolListFromAllocator); - } - } + if (currentPool.isManaged()) { + volumeToPoolObjectMap.put(volume, currentPool); + } else { + // Find a suitable pool for the volume. Call the storage pool allocator to find the list of pools. - if (poolList != null && !poolList.isEmpty()) { - // Volume needs to be migrated. Pick the first pool from the list. Add a mapping to migrate the - // volume to a pool only if it is required; that is the current pool on which the volume resides - // is not available on the destination host. - final Iterator iter = poolList.iterator(); - while (iter.hasNext()) { - if (currentPool.getId() == iter.next().getId()) { - currentPoolAvailable = true; - break; + final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType()); + final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), null, null); + + final List poolList = new ArrayList<>(); + final ExcludeList avoid = new ExcludeList(); + + for (final StoragePoolAllocator allocator : _storagePoolAllocators) { + final List poolListFromAllocator = allocator.allocateToPool(diskProfile, profile, plan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL); + + if (poolListFromAllocator != null && !poolListFromAllocator.isEmpty()) { + poolList.addAll(poolListFromAllocator); } } - if (!currentPoolAvailable) { - volumeToPoolObjectMap.put(volume, _storagePoolDao.findByUuid(poolList.get(0).getUuid())); - } - } + boolean currentPoolAvailable = false; + if (poolList != null && !poolList.isEmpty()) { + // Volume needs to be migrated. Pick the first pool from the list. Add a mapping to migrate the + // volume to a pool only if it is required; that is the current pool on which the volume resides + // is not available on the destination host. - if (!currentPoolAvailable && !volumeToPoolObjectMap.containsKey(volume)) { - // Cannot find a pool for the volume. Throw an exception. - throw new CloudRuntimeException("Cannot find a storage pool which is available for volume " + volume + " while migrating virtual machine " + - profile.getVirtualMachine() + " to host " + host); + final Iterator iter = poolList.iterator(); + + while (iter.hasNext()) { + if (currentPool.getId() == iter.next().getId()) { + currentPoolAvailable = true; + + break; + } + } + + if (!currentPoolAvailable) { + volumeToPoolObjectMap.put(volume, _storagePoolDao.findByUuid(poolList.get(0).getUuid())); + } + } + + if (!currentPoolAvailable && !volumeToPoolObjectMap.containsKey(volume)) { + // Cannot find a pool for the volume. Throw an exception. + throw new CloudRuntimeException("Cannot find a storage pool which is available for volume " + volume + " while migrating virtual machine " + + profile.getVirtualMachine() + " to host " + host); + } } } } diff --git a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java index ad2ad4187702..24fcaa03f56c 100644 --- a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java +++ b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java @@ -231,6 +231,7 @@ public void setManaged(boolean managed) { this.managed = managed; } + @Override public boolean isManaged() { return managed; } diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageReceiveCommandWrapper.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageReceiveCommandWrapper.java index 046a4253404a..fdcb7b5ffbef 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageReceiveCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageReceiveCommandWrapper.java @@ -31,7 +31,6 @@ import com.cloud.agent.api.MigrateWithStorageReceiveAnswer; import com.cloud.agent.api.MigrateWithStorageReceiveCommand; import com.cloud.agent.api.to.NicTO; -import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.agent.api.to.VolumeTO; import com.cloud.hypervisor.xenserver.resource.XenServer610Resource; @@ -56,7 +55,7 @@ public final class XenServer610MigrateWithStorageReceiveCommandWrapper extends C public Answer execute(final MigrateWithStorageReceiveCommand command, final XenServer610Resource xenServer610Resource) { final Connection connection = xenServer610Resource.getConnection(); final VirtualMachineTO vmSpec = command.getVirtualMachine(); - final List> volumeToFiler = command.getVolumeToFiler(); + final List> volumeToStorageUuid = command.getVolumeToStorageUuid(); try { // In a cluster management server setup, the migrate with storage receive and send @@ -69,10 +68,12 @@ public Answer execute(final MigrateWithStorageReceiveCommand command, final XenS // storage send command execution. Gson gson = new Gson(); // Get a map of all the SRs to which the vdis will be migrated. - final List> volumeToSr = new ArrayList>(); - for (final Pair entry : volumeToFiler) { - final StorageFilerTO storageFiler = entry.second(); - final SR sr = xenServer610Resource.getStorageRepository(connection, storageFiler.getUuid()); + final List> volumeToSr = new ArrayList<>(); + + for (final Pair entry : volumeToStorageUuid) { + final String storageUuid = entry.second(); + final SR sr = xenServer610Resource.getStorageRepository(connection, storageUuid); + volumeToSr.add(new Pair(entry.first(), sr)); } diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateStoragePoolCommandWrapper.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateStoragePoolCommandWrapper.java index bed417f35e35..7b2a599d3c0b 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateStoragePoolCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateStoragePoolCommandWrapper.java @@ -19,6 +19,8 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; +import java.util.Map; + import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; @@ -39,20 +41,35 @@ public final class CitrixCreateStoragePoolCommandWrapper extends CommandWrapper< public Answer execute(final CreateStoragePoolCommand command, final CitrixResourceBase citrixResourceBase) { final Connection conn = citrixResourceBase.getConnection(); final StorageFilerTO pool = command.getPool(); + try { - if (pool.getType() == StoragePoolType.NetworkFilesystem) { - citrixResourceBase.getNfsSR(conn, Long.toString(pool.getId()), pool.getUuid(), pool.getHost(), pool.getPath(), pool.toString()); - } else if (pool.getType() == StoragePoolType.IscsiLUN) { - citrixResourceBase.getIscsiSR(conn, pool.getUuid(), pool.getHost(), pool.getPath(), null, null, false); - } else if (pool.getType() == StoragePoolType.PreSetup) { - } else { - return new Answer(command, false, "The pool type: " + pool.getType().name() + " is not supported."); + if (command.getCreateDatastore()) { + Map details = command.getDetails(); + + String srNameLabel = details.get(CreateStoragePoolCommand.DATASTORE_NAME); + String storageHost = details.get(CreateStoragePoolCommand.STORAGE_HOST); + String iqn = details.get(CreateStoragePoolCommand.IQN); + + citrixResourceBase.getIscsiSR(conn, srNameLabel, storageHost, iqn, null, null, false); } + else { + if (pool.getType() == StoragePoolType.NetworkFilesystem) { + citrixResourceBase.getNfsSR(conn, Long.toString(pool.getId()), pool.getUuid(), pool.getHost(), pool.getPath(), pool.toString()); + } else if (pool.getType() == StoragePoolType.IscsiLUN) { + citrixResourceBase.getIscsiSR(conn, pool.getUuid(), pool.getHost(), pool.getPath(), null, null, false); + } else if (pool.getType() == StoragePoolType.PreSetup) { + } else { + return new Answer(command, false, "The pool type: " + pool.getType().name() + " is not supported."); + } + } + return new Answer(command, true, "success"); } catch (final Exception e) { final String msg = "Catch Exception " + e.getClass().getName() + ", create StoragePool failed due to " + e.toString() + " on host:" + citrixResourceBase.getHost().getUuid() + " pool: " + pool.getHost() + pool.getPath(); + s_logger.warn(msg, e); + return new Answer(command, false, msg); } } diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java index a9ae680029fe..c93dd902034d 100644 --- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java +++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java @@ -19,6 +19,8 @@ package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase; +import java.util.Map; + import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; @@ -32,22 +34,40 @@ @ResourceWrapper(handles = DeleteStoragePoolCommand.class) public final class CitrixDeleteStoragePoolCommandWrapper extends CommandWrapper { - private static final Logger s_logger = Logger.getLogger(CitrixDeleteStoragePoolCommandWrapper.class); @Override public Answer execute(final DeleteStoragePoolCommand command, final CitrixResourceBase citrixResourceBase) { final Connection conn = citrixResourceBase.getConnection(); final StorageFilerTO poolTO = command.getPool(); + try { - final SR sr = citrixResourceBase.getStorageRepository(conn, poolTO.getUuid()); + final SR sr; + + // getRemoveDatastore being true indicates we are using managed storage and need to pull the SR name out of a Map + // instead of pulling it out using getUuid of the StorageFilerTO instance. + if (command.getRemoveDatastore()) { + Map details = command.getDetails(); + + String srNameLabel = details.get(DeleteStoragePoolCommand.DATASTORE_NAME); + + sr = citrixResourceBase.getStorageRepository(conn, srNameLabel); + } + else { + sr = citrixResourceBase.getStorageRepository(conn, poolTO.getUuid()); + } + citrixResourceBase.removeSR(conn, sr); + final Answer answer = new Answer(command, true, "success"); + return answer; } catch (final Exception e) { - final String msg = "DeleteStoragePoolCommand XenAPIException:" + e.getMessage() + " host:" + citrixResourceBase.getHost().getUuid() + " pool: " + poolTO.getHost() - + poolTO.getPath(); - s_logger.warn(msg, e); + final String msg = "DeleteStoragePoolCommand XenAPIException:" + e.getMessage() + " host:" + citrixResourceBase.getHost().getUuid() + + " pool: " + poolTO.getHost() + poolTO.getPath(); + + s_logger.error(msg, e); + return new Answer(command, false, msg); } } diff --git a/plugins/hypervisors/xenserver/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java b/plugins/hypervisors/xenserver/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java index 7de96b0a9e11..2409b6e2e69b 100644 --- a/plugins/hypervisors/xenserver/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java +++ b/plugins/hypervisors/xenserver/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.storage.motion; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -28,6 +29,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; @@ -39,6 +41,8 @@ import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CreateStoragePoolCommand; +import com.cloud.agent.api.DeleteStoragePoolCommand; import com.cloud.agent.api.MigrateWithStorageAnswer; import com.cloud.agent.api.MigrateWithStorageCommand; import com.cloud.agent.api.MigrateWithStorageCompleteAnswer; @@ -56,9 +60,12 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.StoragePool; import com.cloud.storage.VolumeVO; +import com.cloud.storage.VolumeDetailVO; import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.Pair; +import com.cloud.utils.StringUtils; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.dao.VMInstanceDao; @@ -74,6 +81,8 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy { @Inject PrimaryDataStoreDao storagePoolDao; @Inject + private VolumeDetailsDao volumeDetailsDao; + @Inject VMInstanceDao instanceDao; @Override @@ -120,25 +129,175 @@ public void copyAsync(Map volumeMap, VirtualMachineTO vmT callback.complete(result); } + private String getBasicIqn(long volumeId) { + VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, PrimaryDataStoreDriver.BASIC_IQN); + + return volumeDetail.getValue(); + } + + /** + * Tell the underlying storage plug-in to create a new volume, put it in the VAG of the destination cluster, and + * send a command to the destination cluster to create an SR and to attach to the SR from all hosts in the cluster. + */ + private String handleManagedVolumePreMigration(VolumeInfo volumeInfo, StoragePool storagePool, Host destHost) { + final PrimaryDataStoreDriver pdsd = (PrimaryDataStoreDriver)volumeInfo.getDataStore().getDriver(); + + VolumeDetailVO volumeDetailVo = new VolumeDetailVO(volumeInfo.getId(), PrimaryDataStoreDriver.BASIC_CREATE, Boolean.TRUE.toString(), false); + + volumeDetailsDao.persist(volumeDetailVo); + + pdsd.createAsync(volumeInfo.getDataStore(), volumeInfo, null); + + volumeDetailVo = new VolumeDetailVO(volumeInfo.getId(), PrimaryDataStoreDriver.BASIC_GRANT_ACCESS, Boolean.TRUE.toString(), false); + + volumeDetailsDao.persist(volumeDetailVo); + + pdsd.grantAccess(volumeInfo, destHost, volumeInfo.getDataStore()); + + final Map details = new HashMap<>(); + + final String iqn = getBasicIqn(volumeInfo.getId()); + + details.put(CreateStoragePoolCommand.DATASTORE_NAME, iqn); + + details.put(CreateStoragePoolCommand.IQN, iqn); + + details.put(CreateStoragePoolCommand.STORAGE_HOST, storagePool.getHostAddress()); + + details.put(CreateStoragePoolCommand.STORAGE_PORT, String.valueOf(storagePool.getPort())); + + final CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, storagePool); + + cmd.setDetails(details); + cmd.setCreateDatastore(true); + + final Answer answer = agentMgr.easySend(destHost.getId(), cmd); + + if (answer == null || !answer.getResult()) { + String errMsg = "Error interacting with host (related to CreateStoragePoolCommand)" + + (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); + + s_logger.error(errMsg); + + throw new CloudRuntimeException(errMsg); + } + + return iqn; + } + + private void handleManagedVolumePostMigration(VolumeInfo volumeInfo, Host srcHost, VolumeObjectTO volumeTO) { + final Map details = new HashMap<>(); + + details.put(DeleteStoragePoolCommand.DATASTORE_NAME, volumeInfo.get_iScsiName()); + + final DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand(); + + cmd.setDetails(details); + cmd.setRemoveDatastore(true); + + final Answer answer = agentMgr.easySend(srcHost.getId(), cmd); + + if (answer == null || !answer.getResult()) { + String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" + + (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); + + s_logger.error(errMsg); + + throw new CloudRuntimeException(errMsg); + } + + final PrimaryDataStoreDriver pdsd = (PrimaryDataStoreDriver)volumeInfo.getDataStore().getDriver(); + + pdsd.revokeAccess(volumeInfo, srcHost, volumeInfo.getDataStore()); + + VolumeDetailVO volumeDetailVo = new VolumeDetailVO(volumeInfo.getId(), PrimaryDataStoreDriver.BASIC_DELETE, Boolean.TRUE.toString(), false); + + volumeDetailsDao.persist(volumeDetailVo); + + pdsd.deleteAsync(volumeInfo.getDataStore(), volumeInfo, null); + + VolumeVO volumeVO = volDao.findById(volumeInfo.getId()); + + volumeVO.setPath(volumeTO.getPath()); + + volDao.update(volumeVO.getId(), volumeVO); + } + + private void handleManagedVolumesAfterFailedMigration(Map volumeToPool, Host destHost) { + for (Map.Entry entry : volumeToPool.entrySet()) { + VolumeInfo volumeInfo = entry.getKey(); + StoragePool storagePool = storagePoolDao.findById(volumeInfo.getPoolId()); + + if (storagePool.isManaged()) { + final Map details = new HashMap<>(); + + details.put(DeleteStoragePoolCommand.DATASTORE_NAME, getBasicIqn(volumeInfo.getId())); + + final DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand(); + + cmd.setDetails(details); + cmd.setRemoveDatastore(true); + + final Answer answer = agentMgr.easySend(destHost.getId(), cmd); + + if (answer == null || !answer.getResult()) { + String errMsg = "Error interacting with host (related to handleManagedVolumesAfterFailedMigration)" + + (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : ""); + + s_logger.error(errMsg); + + // no need to throw an exception here as the calling code is responsible for doing so + // regardless of the success or lack thereof concerning this method + return; + } + + final PrimaryDataStoreDriver pdsd = (PrimaryDataStoreDriver)volumeInfo.getDataStore().getDriver(); + + VolumeDetailVO volumeDetailVo = new VolumeDetailVO(volumeInfo.getId(), PrimaryDataStoreDriver.BASIC_REVOKE_ACCESS, Boolean.TRUE.toString(), false); + + volumeDetailsDao.persist(volumeDetailVo); + + pdsd.revokeAccess(volumeInfo, destHost, volumeInfo.getDataStore()); + + volumeDetailVo = new VolumeDetailVO(volumeInfo.getId(), PrimaryDataStoreDriver.BASIC_DELETE_FAILURE, Boolean.TRUE.toString(), false); + + volumeDetailsDao.persist(volumeDetailVo); + + pdsd.deleteAsync(volumeInfo.getDataStore(), volumeInfo, null); + } + } + } + private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachineTO to, Host srcHost, Host destHost, Map volumeToPool) throws AgentUnavailableException { + // Initiate migration of a virtual machine with its volumes. - // Initiate migration of a virtual machine with it's volumes. try { - List> volumeToFilerto = new ArrayList>(); + List> volumeToStorageUuid = new ArrayList<>(); + for (Map.Entry entry : volumeToPool.entrySet()) { - VolumeInfo volume = entry.getKey(); - VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId())); - StorageFilerTO filerTo = new StorageFilerTO((StoragePool)entry.getValue()); - volumeToFilerto.add(new Pair(volumeTo, filerTo)); + VolumeInfo volumeInfo = entry.getKey(); + StoragePool storagePool = storagePoolDao.findById(volumeInfo.getPoolId()); + VolumeTO volumeTo = new VolumeTO(volumeInfo, storagePool); + + if (storagePool.isManaged()) { + String iqn = handleManagedVolumePreMigration(volumeInfo, storagePool, destHost); + + volumeToStorageUuid.add(new Pair<>(volumeTo, iqn)); + } + else { + volumeToStorageUuid.add(new Pair<>(volumeTo, ((StoragePool)entry.getValue()).getPath())); + } } // Migration across cluster needs to be done in three phases. // 1. Send a migrate receive command to the destination host so that it is ready to receive a vm. // 2. Send a migrate send command to the source host. This actually migrates the vm to the destination. // 3. Complete the process. Update the volume details. - MigrateWithStorageReceiveCommand receiveCmd = new MigrateWithStorageReceiveCommand(to, volumeToFilerto); + + MigrateWithStorageReceiveCommand receiveCmd = new MigrateWithStorageReceiveCommand(to, volumeToStorageUuid); MigrateWithStorageReceiveAnswer receiveAnswer = (MigrateWithStorageReceiveAnswer)agentMgr.send(destHost.getId(), receiveCmd); + if (receiveAnswer == null) { s_logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed."); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); @@ -150,16 +309,22 @@ private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachine MigrateWithStorageSendCommand sendCmd = new MigrateWithStorageSendCommand(to, receiveAnswer.getVolumeToSr(), receiveAnswer.getNicToNetwork(), receiveAnswer.getToken()); MigrateWithStorageSendAnswer sendAnswer = (MigrateWithStorageSendAnswer)agentMgr.send(srcHost.getId(), sendCmd); + if (sendAnswer == null) { + handleManagedVolumesAfterFailedMigration(volumeToPool, destHost); + s_logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed."); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else if (!sendAnswer.getResult()) { + handleManagedVolumesAfterFailedMigration(volumeToPool, destHost); + s_logger.error("Migration with storage of vm " + vm + " failed. Details: " + sendAnswer.getDetails()); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } MigrateWithStorageCompleteCommand command = new MigrateWithStorageCompleteCommand(to); MigrateWithStorageCompleteAnswer answer = (MigrateWithStorageCompleteAnswer)agentMgr.send(destHost.getId(), command); + if (answer == null) { s_logger.error("Migration with storage of vm " + vm + " failed."); throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); @@ -168,7 +333,7 @@ private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachine throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost); } else { // Update the volume details after migration. - updateVolumePathsAfterMigration(volumeToPool, answer.getVolumeTos()); + updateVolumePathsAfterMigration(volumeToPool, answer.getVolumeTos(), srcHost); } return answer; @@ -181,7 +346,7 @@ private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachine private Answer migrateVmWithVolumesWithinCluster(VMInstanceVO vm, VirtualMachineTO to, Host srcHost, Host destHost, Map volumeToPool) throws AgentUnavailableException { - // Initiate migration of a virtual machine with it's volumes. + // Initiate migration of a virtual machine with its volumes. try { List> volumeToFilerto = new ArrayList>(); for (Map.Entry entry : volumeToPool.entrySet()) { @@ -201,7 +366,7 @@ private Answer migrateVmWithVolumesWithinCluster(VMInstanceVO vm, VirtualMachine throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost + ". " + answer.getDetails()); } else { // Update the volume details after migration. - updateVolumePathsAfterMigration(volumeToPool, answer.getVolumeTos()); + updateVolumePathsAfterMigration(volumeToPool, answer.getVolumeTos(), srcHost); } return answer; @@ -211,28 +376,39 @@ private Answer migrateVmWithVolumesWithinCluster(VMInstanceVO vm, VirtualMachine } } - private void updateVolumePathsAfterMigration(Map volumeToPool, List volumeTos) { + private void updateVolumePathsAfterMigration(Map volumeToPool, List volumeTos, Host srcHost) { for (Map.Entry entry : volumeToPool.entrySet()) { + VolumeInfo volumeInfo = entry.getKey(); + StoragePool storagePool = (StoragePool)entry.getValue(); + boolean updated = false; - VolumeInfo volume = entry.getKey(); - StoragePool pool = (StoragePool)entry.getValue(); + for (VolumeObjectTO volumeTo : volumeTos) { - if (volume.getId() == volumeTo.getId()) { - VolumeVO volumeVO = volDao.findById(volume.getId()); - Long oldPoolId = volumeVO.getPoolId(); - volumeVO.setPath(volumeTo.getPath()); - volumeVO.setFolder(pool.getPath()); - volumeVO.setPodId(pool.getPodId()); - volumeVO.setPoolId(pool.getId()); - volumeVO.setLastPoolId(oldPoolId); - volDao.update(volume.getId(), volumeVO); + if (volumeInfo.getId() == volumeTo.getId()) { + if (storagePool.isManaged()) { + handleManagedVolumePostMigration(volumeInfo, srcHost, volumeTo); + } + else { + VolumeVO volumeVO = volDao.findById(volumeInfo.getId()); + Long oldPoolId = volumeVO.getPoolId(); + + volumeVO.setPath(volumeTo.getPath()); + volumeVO.setFolder(storagePool.getPath()); + volumeVO.setPodId(storagePool.getPodId()); + volumeVO.setPoolId(storagePool.getId()); + volumeVO.setLastPoolId(oldPoolId); + + volDao.update(volumeInfo.getId(), volumeVO); + } + updated = true; + break; } } if (!updated) { - s_logger.error("Volume path wasn't updated for volume " + volume + " after it was migrated."); + s_logger.error("The volume path wasn't updated for volume '" + volumeInfo + "' after it was migrated."); } } } diff --git a/plugins/hypervisors/xenserver/test/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/XenServer610WrapperTest.java b/plugins/hypervisors/xenserver/test/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/XenServer610WrapperTest.java index f294af118fce..8fa68f58c81b 100644 --- a/plugins/hypervisors/xenserver/test/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/XenServer610WrapperTest.java +++ b/plugins/hypervisors/xenserver/test/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/XenServer610WrapperTest.java @@ -204,9 +204,9 @@ public void testMigrateWithStorageReceiveCommand() { final StorageFilerTO storage1 = Mockito.mock(StorageFilerTO.class); final StorageFilerTO storage2 = Mockito.mock(StorageFilerTO.class); - final List> volumeToFiler = new ArrayList>(); - volumeToFiler.add(new Pair(vol1, storage1)); - volumeToFiler.add(new Pair(vol2, storage2)); + final List> volumeToFiler = new ArrayList<>(); + volumeToFiler.add(new Pair<>(vol1, storage1.getPath())); + volumeToFiler.add(new Pair<>(vol2, storage2.getPath())); final NicTO nicTO1 = Mockito.mock(NicTO.class); final NicTO nicTO2 = Mockito.mock(NicTO.class); diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java index af969e168af2..ccc1bdcd8cf9 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java @@ -94,6 +94,8 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { private static final long MIN_IOPS_FOR_SNAPSHOT_VOLUME = 100L; private static final long MAX_IOPS_FOR_SNAPSHOT_VOLUME = 20000L; + private static final String BASIC_SF_ID = "basicSfId"; + @Inject private AccountDao accountDao; @Inject private AccountDetailsDao accountDetailsDao; @Inject private ClusterDao clusterDao; @@ -153,7 +155,7 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore Preconditions.checkArgument(host != null, "'host' should not be 'null'"); Preconditions.checkArgument(dataStore != null, "'dataStore' should not be 'null'"); - long sfVolumeId = getSolidFireVolumeId(dataObject); + long sfVolumeId = getSolidFireVolumeId(dataObject, true); long clusterId = host.getClusterId(); long storagePoolId = dataStore.getId(); @@ -215,7 +217,7 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) return; } - long sfVolumeId = getSolidFireVolumeId(dataObject); + long sfVolumeId = getSolidFireVolumeId(dataObject, false); long clusterId = host.getClusterId(); long storagePoolId = dataStore.getId(); @@ -252,9 +254,31 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) } } - private long getSolidFireVolumeId(DataObject dataObject) { + private long getSolidFireVolumeId(DataObject dataObject, boolean grantAccess) { if (dataObject.getType() == DataObjectType.VOLUME) { - return Long.parseLong(((VolumeInfo)dataObject).getFolder()); + final VolumeInfo volumeInfo = (VolumeInfo)dataObject; + final long volumeId = volumeInfo.getId(); + + if (grantAccess && isBasicGrantAccess(volumeId)) { + volumeDetailsDao.removeDetail(volumeInfo.getId(), BASIC_GRANT_ACCESS); + + final Long sfVolumeId = getBasicSfVolumeId(volumeId); + + Preconditions.checkNotNull(sfVolumeId, "'sfVolumeId' should not be 'null' (basic grant access)."); + + return sfVolumeId; + } + else if (!grantAccess && isBasicRevokeAccess(volumeId)) { + volumeDetailsDao.removeDetail(volumeInfo.getId(), BASIC_REVOKE_ACCESS); + + final Long sfVolumeId = getBasicSfVolumeId(volumeId); + + Preconditions.checkNotNull(sfVolumeId, "'sfVolumeId' should not be 'null' (basic revoke access)."); + + return sfVolumeId; + } + + return Long.parseLong(volumeInfo.getFolder()); } if (dataObject.getType() == DataObjectType.SNAPSHOT) { @@ -271,7 +295,7 @@ private long getSolidFireVolumeId(DataObject dataObject) { return getVolumeIdFrom_iScsiPath(((TemplateInfo)dataObject).getInstallPath()); } - throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to getSolidFireVolumeId(DataObject)"); + throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to getSolidFireVolumeId(DataObject, boolean)"); } private long getVolumeIdFrom_iScsiPath(String iScsiPath) { @@ -313,10 +337,11 @@ private long getDefaultBurstIops(long storagePoolId, long maxIops) { private SolidFireUtil.SolidFireVolume createSolidFireVolume(SolidFireUtil.SolidFireConnection sfConnection, DataObject dataObject, long sfAccountId) { long storagePoolId = dataObject.getDataStore().getId(); - Long minIops = null; - Long maxIops = null; - Long volumeSize = dataObject.getSize(); - String volumeName = null; + + final Long minIops; + final Long maxIops; + final Long volumeSize; + final String volumeName; final Map mapAttributes; @@ -647,6 +672,58 @@ private void removeTempVolumeId(long csSnapshotId) { snapshotDetailsDao.remove(snapshotDetails.getId()); } + private Long getBasicSfVolumeId(long volumeId) { + VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, BASIC_SF_ID); + + if (volumeDetail != null && volumeDetail.getValue() != null) { + return new Long(volumeDetail.getValue()); + } + + return null; + } + + private String getBasicIqn(long volumeId) { + VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, BASIC_IQN); + + if (volumeDetail != null && volumeDetail.getValue() != null) { + return volumeDetail.getValue(); + } + + return null; + } + + // If isBasicCreate returns true, this means the calling code simply wants us to create a SolidFire volume with specified + // characteristics. We do not update the cloud.volumes table with this info. + private boolean isBasicCreate(long volumeId) { + return getBooleanValueFromVolumeDetails(volumeId, BASIC_CREATE); + } + + private boolean isBasicDelete(long volumeId) { + return getBooleanValueFromVolumeDetails(volumeId, BASIC_DELETE); + } + + private boolean isBasicDeleteFailure(long volumeId) { + return getBooleanValueFromVolumeDetails(volumeId, BASIC_DELETE_FAILURE); + } + + private boolean isBasicGrantAccess(long volumeId) { + return getBooleanValueFromVolumeDetails(volumeId, BASIC_GRANT_ACCESS); + } + + private boolean isBasicRevokeAccess(long volumeId) { + return getBooleanValueFromVolumeDetails(volumeId, BASIC_REVOKE_ACCESS); + } + + private boolean getBooleanValueFromVolumeDetails(long volumeId, String name) { + VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, name); + + if (volumeDetail != null && volumeDetail.getValue() != null) { + return Boolean.parseBoolean(volumeDetail.getValue()); + } + + return false; + } + private long getCsIdForCloning(long volumeId, String cloneOf) { VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, cloneOf); @@ -788,11 +865,13 @@ public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncComplet LOGGER.error(errMsg); } - CommandResult result = new CommandResult(); + if (callback != null) { + CommandResult result = new CommandResult(); - result.setResult(errMsg); + result.setResult(errMsg); - callback.complete(result); + callback.complete(result); + } } @Override @@ -950,19 +1029,43 @@ private void updateSnapshotDetails(long csSnapshotId, long sfNewVolumeId, long s snapshotDetailsDao.persist(snapshotDetail); } + private void addBasicCreateInfoToVolumeDetails(long volumeId, SolidFireUtil.SolidFireVolume sfVolume) { + VolumeDetailVO volumeDetailVo = new VolumeDetailVO(volumeId, BASIC_SF_ID, String.valueOf(sfVolume.getId()), false); + + volumeDetailsDao.persist(volumeDetailVo); + + volumeDetailVo = new VolumeDetailVO(volumeId, BASIC_IQN, sfVolume.getIqn(), false); + + volumeDetailsDao.persist(volumeDetailVo); + } + private String createVolume(VolumeInfo volumeInfo, long storagePoolId) { - verifySufficientBytesForStoragePool(volumeInfo, storagePoolId); - verifySufficientIopsForStoragePool(volumeInfo.getMinIops() != null ? volumeInfo.getMinIops() : getDefaultMinIops(storagePoolId), storagePoolId); + boolean isBasicCreate = isBasicCreate(volumeInfo.getId()); + + if (!isBasicCreate) { + verifySufficientBytesForStoragePool(volumeInfo, storagePoolId); + verifySufficientIopsForStoragePool(volumeInfo.getMinIops() != null ? volumeInfo.getMinIops() : getDefaultMinIops(storagePoolId), storagePoolId); + } SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao); long sfAccountId = getCreateSolidFireAccountId(sfConnection, volumeInfo.getAccountId(), storagePoolId); + SolidFireUtil.SolidFireVolume sfVolume; + + if (isBasicCreate) { + sfVolume = createSolidFireVolume(sfConnection, volumeInfo, sfAccountId); + + volumeDetailsDao.removeDetail(volumeInfo.getId(), BASIC_CREATE); + + addBasicCreateInfoToVolumeDetails(volumeInfo.getId(), sfVolume); + + return sfVolume.getIqn(); + } + long csSnapshotId = getCsIdForCloning(volumeInfo.getId(), "cloneOfSnapshot"); long csTemplateId = getCsIdForCloning(volumeInfo.getId(), "cloneOfTemplate"); - SolidFireUtil.SolidFireVolume sfVolume; - if (csSnapshotId > 0) { // We are supposed to create a clone of the underlying volume or snapshot that supports the CloudStack snapshot. sfVolume = createClone(sfConnection, csSnapshotId, volumeInfo, sfAccountId, storagePoolId, DataObjectType.SNAPSHOT); @@ -1083,23 +1186,66 @@ private String createTemplateVolume(TemplateInfo templateInfo, long storagePoolI return iqn; } + private void performBasicDelete(SolidFireUtil.SolidFireConnection sfConnection, long volumeId) { + Long sfVolumeId = getBasicSfVolumeId(volumeId); + + Preconditions.checkNotNull(sfVolumeId, "'sfVolumeId' should not be 'null'."); + + String iqn = getBasicIqn(volumeId); + + Preconditions.checkNotNull(iqn, "'iqn' should not be 'null'."); + + VolumeVO volumeVO = volumeDao.findById(volumeId); + + SolidFireUtil.deleteSolidFireVolume(sfConnection, Long.parseLong(volumeVO.getFolder())); + + volumeVO.setFolder(String.valueOf(sfVolumeId)); + volumeVO.set_iScsiName(iqn); + + volumeDao.update(volumeId, volumeVO); + + volumeDetailsDao.removeDetail(volumeId, BASIC_SF_ID); + volumeDetailsDao.removeDetail(volumeId, BASIC_IQN); + volumeDetailsDao.removeDetail(volumeId, BASIC_DELETE); + } + + private void performBasicDeleteFailure(SolidFireUtil.SolidFireConnection sfConnection, long volumeId) { + Long sfVolumeId = getBasicSfVolumeId(volumeId); + + Preconditions.checkNotNull(sfVolumeId, "'sfVolumeId' should not be 'null'."); + + SolidFireUtil.deleteSolidFireVolume(sfConnection, sfVolumeId); + + volumeDetailsDao.removeDetail(volumeId, BASIC_SF_ID); + volumeDetailsDao.removeDetail(volumeId, BASIC_IQN); + volumeDetailsDao.removeDetail(volumeId, BASIC_DELETE_FAILURE); + } + private void deleteVolume(VolumeInfo volumeInfo, long storagePoolId) { try { long volumeId = volumeInfo.getId(); SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao); - deleteSolidFireVolume(sfConnection, volumeInfo); + if (isBasicDelete(volumeId)) { + performBasicDelete(sfConnection, volumeId); + } + else if (isBasicDeleteFailure(volumeId)) { + performBasicDeleteFailure(sfConnection, volumeId); + } + else { + deleteSolidFireVolume(sfConnection, volumeInfo); - volumeDetailsDao.removeDetails(volumeId); + volumeDetailsDao.removeDetails(volumeId); - StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); - long usedBytes = getUsedBytes(storagePool, volumeId); + long usedBytes = getUsedBytes(storagePool, volumeId); - storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes); + storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes); - storagePoolDao.update(storagePoolId, storagePool); + storagePoolDao.update(storagePoolId, storagePool); + } } catch (Exception ex) { LOGGER.debug(SolidFireUtil.LOG_PREFIX + "Failed to delete SolidFire volume. CloudStack volume ID: " + volumeInfo.getId(), ex); diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index 60b44d74c288..82f8030515c6 100644 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -1205,12 +1205,15 @@ public Ternary, Integer>, List, Map iterator = allHosts.iterator(); iterator.hasNext();) { - final Host host = iterator.next(); + final StoragePool storagePool = _poolDao.findById(volume.getPoolId()); + final Long volClusterId = storagePool.getClusterId(); + + for (final Iterator iterator = allHosts.iterator(); iterator.hasNext();) { + final Host host = iterator.next(); + + if (volClusterId != null) { if (!host.getClusterId().equals(volClusterId) || usesLocal) { if (hasSuitablePoolsForVolume(volume, host, vmProfile)) { requiresStorageMotion.put(host, true); @@ -1219,8 +1222,16 @@ public Ternary, Integer>, List, Map, Integer>, List, Map 0: + sf_volumes = _get_not_active_sf_volumes_only(sf_volumes) + else: + sf_volumes = sf_client.list_deleted_volumes() + + return sf_volumes + +def _get_not_active_sf_volumes_only(sf_volumes): + not_active_sf_volumes_only = [] + + for sf_volume in sf_volumes: + if sf_volume["status"] != "active": + not_active_sf_volumes_only.append(sf_volume) + + return not_active_sf_volumes_only + +def get_active_sf_volumes(sf_client, sf_account_id=None): + if sf_account_id is not None: + sf_volumes = sf_client.list_volumes_for_account(sf_account_id) + + if sf_volumes is not None and len(sf_volumes) > 0: + sf_volumes = _get_active_sf_volumes_only(sf_volumes) + else: + sf_volumes = sf_client.list_active_volumes() + + return sf_volumes + +def _get_active_sf_volumes_only(sf_volumes): + active_sf_volumes_only = [] + + for sf_volume in sf_volumes: + if sf_volume["status"] == "active": + active_sf_volumes_only.append(sf_volume) + + return active_sf_volumes_only + +def check_and_get_sf_volume(sf_volumes, sf_volume_name, obj_assert, should_exist=True): + sf_volume = None + + for volume in sf_volumes: + if volume['name'] == sf_volume_name: + sf_volume = volume + break + + if should_exist: + obj_assert.assertNotEqual( + sf_volume, + None, + "Check if SF volume was created in correct account: " + str(sf_volumes) + ) + else: + obj_assert.assertEqual( + sf_volume, + None, + "Check if SF volume was deleted: " + str(sf_volumes) + ) + + return sf_volume + +def check_xen_sr(xen_sr_name, xen_session, obj_assert, should_exist=True): + xen_sr = xen_session.xenapi.SR.get_by_name_label(xen_sr_name) + + if should_exist: + check_list(xen_sr, 1, obj_assert, "SR " + xen_sr_name + " doesn't exist, but should.") + + sr_shared = xen_session.xenapi.SR.get_shared(xen_sr[0]) + + obj_assert.assertEqual( + sr_shared, + True, + "SR " + xen_sr_name + " is not shared, but should be." + ) + else: + check_list(xen_sr, 0, obj_assert, "SR " + xen_sr_name + " exists, but shouldn't.") + +def check_vag(sf_volume, sf_vag_id, obj_assert): + obj_assert.assertEqual( + len(sf_volume['volumeAccessGroups']), + 1, + "The volume should only be in one VAG." + ) + + obj_assert.assertEqual( + sf_volume['volumeAccessGroups'][0], + sf_vag_id, + "The volume is not in the VAG with the following ID: " + str(sf_vag_id) + "." + ) + +def get_vag_id(cs_api, cluster_id, primary_storage_id, obj_assert): + # Get SF Volume Access Group ID + sf_vag_id_request = {'clusterid': cluster_id, 'storageid': primary_storage_id} + sf_vag_id_result = cs_api.getSolidFireVolumeAccessGroupId(sf_vag_id_request) + sf_vag_id = sf_vag_id_result['apisolidfirevolumeaccessgroupid']['solidFireVolumeAccessGroupId'] + + obj_assert.assertEqual( + isinstance(sf_vag_id, int), + True, + "The SolidFire VAG ID should be a non-zero integer." + ) + + return sf_vag_id + +def format_iqn(iqn): + return "/" + iqn + "/0" + +def check_size_and_iops(sf_volume, cs_volume, size, obj_assert): + obj_assert.assertEqual( + sf_volume['qos']['minIOPS'], + cs_volume.miniops, + "Check QoS - Min IOPS: " + str(sf_volume['qos']['minIOPS']) + ) + + obj_assert.assertEqual( + sf_volume['qos']['maxIOPS'], + cs_volume.maxiops, + "Check QoS - Max IOPS: " + str(sf_volume['qos']['maxIOPS']) + ) + + obj_assert.assertEqual( + sf_volume['totalSize'], + size, + "Check SolidFire volume size: " + str(sf_volume['totalSize']) + ) + +def get_volume_size_with_hsr(cs_api, cs_volume, obj_assert): + # Get underlying SF volume size with hypervisor snapshot reserve + sf_volume_size_request = {'volumeid': cs_volume.id} + sf_volume_size_result = cs_api.getSolidFireVolumeSize(sf_volume_size_request) + sf_volume_size = sf_volume_size_result['apisolidfirevolumesize']['solidFireVolumeSize'] + + obj_assert.assertEqual( + isinstance(sf_volume_size, int), + True, + "The SolidFire volume size should be a non-zero integer." + ) + + return sf_volume_size From 0e5e60295311aa5994b77529da7cd7f943907977 Mon Sep 17 00:00:00 2001 From: Will Stevens Date: Fri, 17 Jun 2016 16:07:58 -0400 Subject: [PATCH 028/687] Added JSON diff output to the ApiXmlDocReader in additon TXT for parsability --- .../com/cloud/api/doc/ApiXmlDocReader.java | 74 +++++++++++++++++-- 1 file changed, 68 insertions(+), 6 deletions(-) diff --git a/server/src/com/cloud/api/doc/ApiXmlDocReader.java b/server/src/com/cloud/api/doc/ApiXmlDocReader.java index a9e49b2cf325..a8aa97bd584b 100644 --- a/server/src/com/cloud/api/doc/ApiXmlDocReader.java +++ b/server/src/com/cloud/api/doc/ApiXmlDocReader.java @@ -30,6 +30,7 @@ import java.util.List; import java.util.Arrays; +import com.google.gson.Gson; import com.thoughtworks.xstream.XStream; import com.thoughtworks.xstream.io.xml.DomDriver; @@ -45,6 +46,8 @@ public static void main(String[] args) { ArrayList addedCommands = new ArrayList(); ArrayList removedCommands = new ArrayList(); HashMap stableCommands = new HashMap(); + HashMap jsonOut = new HashMap(); + Gson gson = new Gson(); XStream xs = new XStream(new DomDriver()); xs.alias("command", Command.class); @@ -108,49 +111,69 @@ public static void main(String[] args) { } try (FileWriter fstream = new FileWriter(dirName + "/diff.txt"); - BufferedWriter out = new BufferedWriter(fstream);){ + BufferedWriter out = new BufferedWriter(fstream); + FileWriter jfstream = new FileWriter(dirName + "/diff.json"); + BufferedWriter json = new BufferedWriter(jfstream);){ // Print added commands out.write("Added commands:\n"); + ArrayList> addedCmds = new ArrayList>(); for (Command c : addedCommands) { + HashMap addedCmd = new HashMap(); if (c.getDescription() != null && !c.getDescription().isEmpty()) { out.write("\n " + c.getName() + " (" + c.getDescription() + ")\n"); + addedCmd.put("description", c.getDescription()); } else { out.write("\n " + c.getName() + "\n"); } - + addedCmd.put("name", c.getName()); + addedCmds.add(addedCmd); } + jsonOut.put("commands_added", addedCmds); // Print removed commands out.write("\nRemoved commands:\n"); + ArrayList> removedCmds = new ArrayList>(); for (Command c : removedCommands) { + HashMap removedCmd = new HashMap(); if (c.getDescription() != null && !c.getDescription().isEmpty()) { out.write("\n\t" + c.getName() + " (" + c.getDescription() + ")\n"); + removedCmd.put("description", c.getDescription()); } else { out.write("\n\t" + c.getName() + "\n"); } - + removedCmd.put("name", c.getName()); + removedCmds.add(removedCmd); } + jsonOut.put("commands_removed", removedCmds); out.write("\nChanges in command type (sync versus async)\n"); + ArrayList> syncChangeCmds = new ArrayList>(); // Verify if the command was sync and became async and vice versa for (Map.Entryentry : stableCommands.entrySet()) { if (commands.get(entry.getKey()).isAsync() != oldCommands.get(entry.getKey()).isAsync()) { + HashMap syncChangeCmd = new HashMap(); String type = "Sync"; if (commands.get(entry.getKey()).isAsync()) { type = "Async"; } + syncChangeCmd.put("name", entry.getValue().getName()); + syncChangeCmd.put("sync_type", type); + syncChangeCmds.add(syncChangeCmd); out.write("\n\t" + entry.getValue().getName() + " became " + type); } } + jsonOut.put("commands_sync_changed", syncChangeCmds); // Print differences between commands arguments out.write("\n\nChanges in commands arguments:\n"); + ArrayList> argsChangeCmds = new ArrayList>(); for (String key : stableCommands.keySet()) { ArrayList newReqArgs = new ArrayList(); ArrayList removedReqArgs = new ArrayList(); HashMap stableReqArgs = new HashMap(); ArrayList newRespArgs = new ArrayList(); ArrayList removedRespArgs = new ArrayList(); + HashMap argsChangeCmd = new HashMap(); Command newCommand = commands.get(key); Command oldCommand = oldCommands.get(key); @@ -208,22 +231,30 @@ public static void main(String[] args) { commandInfo.append("\n\t" + key); out.write(commandInfo.toString()); out.write("\n"); + argsChangeCmd.put("name", key); // Request if (newReqArgs.size() != 0 || removedReqArgs.size() != 0 || stableReqArgs.size() != 0) { + HashMap requestChanges = new HashMap(); StringBuffer request = new StringBuffer(); request.append("\n\t\tRequest:\n"); out.write(request.toString()); if (newReqArgs.size() != 0) { StringBuffer newParameters = new StringBuffer(); newParameters.append("\n\t\t\tNew parameters: "); + ArrayList> newRequestParams = new ArrayList>(); for (Argument newArg : newReqArgs) { + HashMap newRequestParam = new HashMap(); String isRequiredParam = "optional"; if (newArg.isRequired()) { isRequiredParam = "required"; } + newRequestParam.put("name", newArg.getName()); + newRequestParam.put("required", newArg.isRequired()); + newRequestParams.add(newRequestParam); newParameters.append(newArg.getName() + " (" + isRequiredParam + "), "); } + requestChanges.put("params_new", newRequestParams); newParameters.delete(newParameters.length() - 2, newParameters.length() - 1); out.write(newParameters.toString()); out.write("\n"); @@ -231,9 +262,14 @@ public static void main(String[] args) { if (removedReqArgs.size() != 0) { StringBuffer removedParameters = new StringBuffer(); removedParameters.append("\n\t\t\tRemoved parameters: "); + ArrayList> removedRequestParams = new ArrayList>(); for (Argument removedArg : removedReqArgs) { + HashMap removedRequestParam = new HashMap(); + removedRequestParam.put("name", removedArg.getName()); + removedRequestParams.add(removedRequestParam); removedParameters.append(removedArg.getName() + ", "); } + requestChanges.put("params_removed", removedRequestParams); removedParameters.delete(removedParameters.length() - 2, removedParameters.length() - 1); out.write(removedParameters.toString()); out.write("\n"); @@ -242,52 +278,78 @@ public static void main(String[] args) { if (stableReqArgs.size() != 0) { StringBuffer changedParameters = new StringBuffer(); changedParameters.append("\n\t\t\tChanged parameters: "); + ArrayList> changedRequestParams = new ArrayList>(); for (Argument stableArg : stableReqArgs.values()) { + HashMap changedRequestParam = new HashMap(); String newRequired = "optional"; String oldRequired = "optional"; - if ((oldCommand.getReqArgByName(stableArg.getName()) != null) && (oldCommand.getReqArgByName(stableArg.getName()).isRequired() == true)) + changedRequestParam.put("required_old", false); + changedRequestParam.put("required_new", false); + if ((oldCommand.getReqArgByName(stableArg.getName()) != null) && (oldCommand.getReqArgByName(stableArg.getName()).isRequired() == true)) { oldRequired = "required"; - if ((newCommand.getReqArgByName(stableArg.getName()) != null) && (newCommand.getReqArgByName(stableArg.getName()).isRequired() == true)) + changedRequestParam.put("required_old", true); + } + if ((newCommand.getReqArgByName(stableArg.getName()) != null) && (newCommand.getReqArgByName(stableArg.getName()).isRequired() == true)) { newRequired = "required"; + changedRequestParam.put("required_new", true); + } + changedRequestParam.put("name", stableArg.getName()); + changedRequestParams.add(changedRequestParam); changedParameters.append(stableArg.getName() + " (old version - " + oldRequired + ", new version - " + newRequired + "), "); } + requestChanges.put("params_changed", changedRequestParams); changedParameters.delete(changedParameters.length() - 2, changedParameters.length() - 1); out.write(changedParameters.toString()); out.write("\n"); } + argsChangeCmd.put("request", requestChanges); } // Response if (newRespArgs.size() != 0 || removedRespArgs.size() != 0) { + HashMap responseChanges = new HashMap(); StringBuffer changedResponseParams = new StringBuffer(); changedResponseParams.append("\n\t\tResponse:\n"); out.write(changedResponseParams.toString()); if (newRespArgs.size() != 0) { + ArrayList> newResponseParams = new ArrayList>(); StringBuffer newRespParams = new StringBuffer(); newRespParams.append("\n\t\t\tNew parameters: "); for (Argument newArg : newRespArgs) { + HashMap newResponseParam = new HashMap(); + newResponseParam.put("name", newArg.getName()); + newResponseParams.add(newResponseParam); newRespParams.append(newArg.getName() + ", "); } + responseChanges.put("params_new", newResponseParams); newRespParams.delete(newRespParams.length() - 2, newRespParams.length() - 1); out.write(newRespParams.toString()); out.write("\n"); } if (removedRespArgs.size() != 0) { + ArrayList> removedResponseParams = new ArrayList>(); StringBuffer removedRespParams = new StringBuffer(); removedRespParams.append("\n\t\t\tRemoved parameters: "); for (Argument removedArg : removedRespArgs) { + HashMap removedResponseParam = new HashMap(); + removedResponseParam.put("name", removedArg.getName()); + removedResponseParams.add(removedResponseParam); removedRespParams.append(removedArg.getName() + ", "); } + responseChanges.put("params_removed", removedResponseParams); removedRespParams.delete(removedRespParams.length() - 2, removedRespParams.length() - 1); out.write(removedRespParams.toString()); out.write("\n"); } + argsChangeCmd.put("response", responseChanges); } + argsChangeCmds.add(argsChangeCmd); } } + jsonOut.put("commands_args_changed", argsChangeCmds); + json.write(gson.toJson(jsonOut)); } catch (IOException e) { e.printStackTrace(); } - } } From bb275a5ad161f0ee1d2930e8fc4708caf529bc4b Mon Sep 17 00:00:00 2001 From: nvazquez Date: Thu, 23 Jun 2016 13:13:20 -0300 Subject: [PATCH 029/687] CLOUDSTACK-9422: Granular VMware vms creation as full clones on HV --- .../storage/to/PrimaryDataStoreTO.java | 9 ++ .../com/cloud/capacity/CapacityManager.java | 10 ++ .../orchestration/VolumeOrchestrator.java | 37 +++++++ .../com/cloud/vm/UserVmCloneSettingVO.java | 4 + .../vm/dao/UserVmCloneSettingDaoImpl.java | 2 +- .../motion/AncientDataMotionStrategy.java | 33 +++++-- .../motion/AncientDataMotionStrategyTest.java | 99 +++++++++++++++++++ .../vmware/resource/VmwareResource.java | 62 ++++++++---- .../resource/VmwareStorageProcessor.java | 21 +++- .../VmwareStorageSubsystemCommandHandler.java | 35 ++++--- .../vmware/resource/VmwareResourceTest.java | 74 ++++++++++++-- .../cloud/capacity/CapacityManagerImpl.java | 2 +- .../src/com/cloud/vm/UserVmManagerImpl.java | 20 ---- 13 files changed, 340 insertions(+), 68 deletions(-) create mode 100755 engine/storage/datamotion/test/org/apache/cloudstack/storage/motion/AncientDataMotionStrategyTest.java diff --git a/core/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java b/core/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java index 9b711bc3b3a1..67ff0d71795b 100644 --- a/core/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java +++ b/core/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java @@ -51,6 +51,7 @@ public class PrimaryDataStoreTO implements DataStoreTO { private final String url; private Map details; private static final String pathSeparator = "/"; + private Boolean fullCloneFlag; public PrimaryDataStoreTO(PrimaryDataStore dataStore) { this.uuid = dataStore.getUuid(); @@ -144,4 +145,12 @@ public String toString() { .append("]") .toString(); } + + public Boolean isFullCloneFlag() { + return fullCloneFlag; + } + + public void setFullCloneFlag(Boolean fullCloneFlag) { + this.fullCloneFlag = fullCloneFlag; + } } diff --git a/engine/components-api/src/com/cloud/capacity/CapacityManager.java b/engine/components-api/src/com/cloud/capacity/CapacityManager.java index 3db6e5781520..d190d78a8a22 100644 --- a/engine/components-api/src/com/cloud/capacity/CapacityManager.java +++ b/engine/components-api/src/com/cloud/capacity/CapacityManager.java @@ -35,6 +35,7 @@ public interface CapacityManager { static final String StorageCapacityDisableThresholdCK = "pool.storage.capacity.disablethreshold"; static final String StorageOverprovisioningFactorCK = "storage.overprovisioning.factor"; static final String StorageAllocatedCapacityDisableThresholdCK = "pool.storage.allocated.capacity.disablethreshold"; + static final String VmwareCreateCloneFullCK = "vmware.create.full.clone"; static final ConfigKey CpuOverprovisioningFactor = new ConfigKey(Float.class, CpuOverprovisioningFactorCK, "Advanced", "1.0", "Used for CPU overprovisioning calculation; available CPU will be (actualCpuCapacity * cpu.overprovisioning.factor)", true, ConfigKey.Scope.Cluster, null); @@ -63,6 +64,15 @@ public interface CapacityManager { true, ConfigKey.Scope.Cluster, null); + static final ConfigKey VmwareCreateCloneFull = + new ConfigKey( + "Storage", + Boolean.class, + VmwareCreateCloneFullCK, + "false", + "If set to true, creates VMs as full clones on ESX hypervisor", + true, + ConfigKey.Scope.StoragePool); public boolean releaseVmCapacity(VirtualMachine vm, boolean moveFromReserved, boolean moveToReservered, Long hostId); diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index ca4ef4fd2de5..32cb19b170be 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -69,6 +69,7 @@ import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.agent.manager.allocator.PodAllocator; +import com.cloud.capacity.CapacityManager; import com.cloud.cluster.ClusterManager; import com.cloud.configuration.Resource.ResourceType; import com.cloud.dc.DataCenter; @@ -120,6 +121,7 @@ import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.fsm.StateMachine2; import com.cloud.vm.DiskProfile; +import com.cloud.vm.UserVmCloneSettingVO; import com.cloud.vm.UserVmVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.State; @@ -129,9 +131,15 @@ import com.cloud.vm.VmWorkMigrateVolume; import com.cloud.vm.VmWorkSerializer; import com.cloud.vm.VmWorkTakeVolumeSnapshot; +import com.cloud.vm.dao.UserVmCloneSettingDao; import com.cloud.vm.dao.UserVmDao; public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrationService, Configurable { + + public enum UserVmCloneType { + full, linked + } + private static final Logger s_logger = Logger.getLogger(VolumeOrchestrator.class); @Inject @@ -178,6 +186,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati ClusterManager clusterManager; @Inject StorageManager storageMgr; + @Inject + protected UserVmCloneSettingDao _vmCloneSettingDao; private final StateMachine2 _volStateMachine; protected List _storagePoolAllocators; @@ -1353,6 +1363,33 @@ public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws Sto disk.setDetails(getDetails(volumeInfo, dataStore)); vm.addDisk(disk); + + // If hypervisor is vSphere, check for clone type setting. + if (vm.getHypervisorType().equals(HypervisorType.VMware)) { + // retrieve clone flag. + UserVmCloneType cloneType = UserVmCloneType.linked; + Boolean value = CapacityManager.VmwareCreateCloneFull.valueIn(vol.getPoolId()); + if (value != null && value) { + cloneType = UserVmCloneType.full; + } + try { + UserVmCloneSettingVO cloneSettingVO = _vmCloneSettingDao.findByVmId(vm.getId()); + if (cloneSettingVO != null){ + if (! cloneSettingVO.getCloneType().equals(cloneType.toString())){ + cloneSettingVO.setCloneType(cloneType.toString()); + _vmCloneSettingDao.update(cloneSettingVO.getVmId(), cloneSettingVO); + } + } + else { + UserVmCloneSettingVO vmCloneSettingVO = new UserVmCloneSettingVO(vm.getId(), cloneType.toString()); + _vmCloneSettingDao.persist(vmCloneSettingVO); + } + } + catch (Throwable e){ + s_logger.debug("[NSX_PLUGIN_LOG] ERROR: " + e.getMessage()); + } + } + } } diff --git a/engine/schema/src/com/cloud/vm/UserVmCloneSettingVO.java b/engine/schema/src/com/cloud/vm/UserVmCloneSettingVO.java index 91b4918625d8..adca686db57c 100644 --- a/engine/schema/src/com/cloud/vm/UserVmCloneSettingVO.java +++ b/engine/schema/src/com/cloud/vm/UserVmCloneSettingVO.java @@ -46,4 +46,8 @@ public long getVmId() { public String getCloneType() { return this.cloneType; } + + public void setCloneType(String cloneType) { + this.cloneType = cloneType; + } } diff --git a/engine/schema/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java index d76f6d46d4e3..0761f56917b3 100644 --- a/engine/schema/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java @@ -45,7 +45,7 @@ public UserVmCloneSettingDaoImpl() { public void init() { // Initialize the search builders. vmIdSearch = createSearchBuilder(); - vmIdSearch.and("vmId", vmIdSearch.entity().getCloneType(), Op.EQ); + vmIdSearch.and("vmId", vmIdSearch.entity().getVmId(), Op.EQ); vmIdSearch.done(); cloneTypeSearch = createSearchBuilder(); diff --git a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java index 3124666db19e..57e4181aaf9e 100644 --- a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java +++ b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -45,6 +45,7 @@ import org.apache.cloudstack.storage.RemoteHostEndPoint; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -56,8 +57,10 @@ import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.NfsTO; import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.capacity.CapacityManager; import com.cloud.configuration.Config; import com.cloud.host.Host; +import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StoragePool; @@ -153,7 +156,7 @@ protected Answer copyObject(DataObject srcData, DataObject destData, Host destHo srcForCopy = cacheData = cacheMgr.createCacheObject(srcData, destScope); } - CopyCommand cmd = new CopyCommand(srcForCopy.getTO(), destData.getTO(), _primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand cmd = new CopyCommand(srcForCopy.getTO(), addFullCloneFlagOnVMwareDest(destData.getTO()), _primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); EndPoint ep = destHost != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(destHost) : selector.select(srcForCopy, destData); if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; @@ -201,6 +204,23 @@ protected Answer copyObject(DataObject srcData, DataObject destData, Host destHo } } + /** + * Adds {@code 'vmware.create.full.clone'} value for a given primary storage, whose HV is VMware, on datastore's {@code fullCloneFlag} field + * @param dataTO Dest data store TO + * @return dataTO including fullCloneFlag, if provided + */ + protected DataTO addFullCloneFlagOnVMwareDest(DataTO dataTO) { + if (dataTO != null && dataTO.getHypervisorType().equals(Hypervisor.HypervisorType.VMware)){ + DataStoreTO dataStoreTO = dataTO.getDataStore(); + if (dataStoreTO != null && dataStoreTO instanceof PrimaryDataStoreTO){ + PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) dataStoreTO; + Boolean value = CapacityManager.VmwareCreateCloneFull.valueIn(primaryDataStoreTO.getId()); + primaryDataStoreTO.setFullCloneFlag(value); + } + } + return dataTO; + } + protected Answer copyObject(DataObject srcData, DataObject destData) { return copyObject(srcData, destData, null); } @@ -257,7 +277,7 @@ protected Answer copyVolumeFromSnapshot(DataObject snapObj, DataObject volObj) { ep = selector.select(srcData, volObj); } - CopyCommand cmd = new CopyCommand(srcData.getTO(), volObj.getTO(), _createVolumeFromSnapshotWait, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand cmd = new CopyCommand(srcData.getTO(), addFullCloneFlagOnVMwareDest(volObj.getTO()), _createVolumeFromSnapshotWait, VirtualMachineManager.ExecuteInSequence.value()); Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; @@ -280,7 +300,7 @@ protected Answer copyVolumeFromSnapshot(DataObject snapObj, DataObject volObj) { } protected Answer cloneVolume(DataObject template, DataObject volume) { - CopyCommand cmd = new CopyCommand(template.getTO(), volume.getTO(), 0, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand cmd = new CopyCommand(template.getTO(), addFullCloneFlagOnVMwareDest(volume.getTO()), 0, VirtualMachineManager.ExecuteInSequence.value()); try { EndPoint ep = selector.select(volume.getDataStore()); Answer answer = null; @@ -330,7 +350,7 @@ protected Answer copyVolumeBetweenPools(DataObject srcData, DataObject destData) objOnImageStore.processEvent(Event.CopyingRequested); - CopyCommand cmd = new CopyCommand(objOnImageStore.getTO(), destData.getTO(), _copyvolumewait, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand cmd = new CopyCommand(objOnImageStore.getTO(), addFullCloneFlagOnVMwareDest(destData.getTO()), _copyvolumewait, VirtualMachineManager.ExecuteInSequence.value()); EndPoint ep = selector.select(objOnImageStore, destData); if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; @@ -477,7 +497,7 @@ protected Answer createTemplateFromSnapshot(DataObject srcData, DataObject destD ep = selector.select(srcData, destData); } - CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), _createprivatetemplatefromsnapshotwait, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand cmd = new CopyCommand(srcData.getTO(), addFullCloneFlagOnVMwareDest(destData.getTO()), _createprivatetemplatefromsnapshotwait, VirtualMachineManager.ExecuteInSequence.value()); Answer answer = null; if (ep == null) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; @@ -513,7 +533,7 @@ protected Answer copySnapshot(DataObject srcData, DataObject destData) { Scope selectedScope = pickCacheScopeForCopy(srcData, destData); cacheData = cacheMgr.getCacheObject(srcData, selectedScope); - CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), _backupsnapshotwait, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand cmd = new CopyCommand(srcData.getTO(), addFullCloneFlagOnVMwareDest(destData.getTO()), _backupsnapshotwait, VirtualMachineManager.ExecuteInSequence.value()); cmd.setCacheTO(cacheData.getTO()); cmd.setOptions(options); EndPoint ep = selector.select(srcData, destData); @@ -525,6 +545,7 @@ protected Answer copySnapshot(DataObject srcData, DataObject destData) { answer = ep.sendMessage(cmd); } } else { + addFullCloneFlagOnVMwareDest(destData.getTO()); CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), _backupsnapshotwait, VirtualMachineManager.ExecuteInSequence.value()); cmd.setOptions(options); EndPoint ep = selector.select(srcData, destData, StorageAction.BACKUPSNAPSHOT); diff --git a/engine/storage/datamotion/test/org/apache/cloudstack/storage/motion/AncientDataMotionStrategyTest.java b/engine/storage/datamotion/test/org/apache/cloudstack/storage/motion/AncientDataMotionStrategyTest.java new file mode 100755 index 000000000000..dccb6b445e56 --- /dev/null +++ b/engine/storage/datamotion/test/org/apache/cloudstack/storage/motion/AncientDataMotionStrategyTest.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.motion; + +import static org.mockito.Mockito.when; + +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; + +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.any; + +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.mockito.Spy; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.cloud.agent.api.to.DataTO; +import com.cloud.capacity.CapacityManager; +import com.cloud.hypervisor.Hypervisor.HypervisorType; + +@RunWith(PowerMockRunner.class) +@PrepareForTest(CapacityManager.class) +public class AncientDataMotionStrategyTest { + + @Spy + @InjectMocks + private AncientDataMotionStrategy strategy = new AncientDataMotionStrategy(); + + @Mock + DataTO dataTO; + @Mock + PrimaryDataStoreTO dataStoreTO; + @Mock + ConfigKey vmwareKey; + + private static final long POOL_ID = 1l; + private static final Boolean FULL_CLONE_FLAG = true; + + @Before + public void setup() throws Exception { + MockitoAnnotations.initMocks(this); + + replaceVmwareCreateCloneFullField(); + + when(vmwareKey.valueIn(POOL_ID)).thenReturn(FULL_CLONE_FLAG); + + when(dataTO.getHypervisorType()).thenReturn(HypervisorType.VMware); + when(dataTO.getDataStore()).thenReturn(dataStoreTO); + when(dataStoreTO.getId()).thenReturn(POOL_ID); + } + + private void replaceVmwareCreateCloneFullField() throws Exception { + Field field = CapacityManager.class.getDeclaredField("VmwareCreateCloneFull"); + field.setAccessible(true); + // remove final modifier from field + Field modifiersField = Field.class.getDeclaredField("modifiers"); + modifiersField.setAccessible(true); + modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); + field.set(null, vmwareKey); + } + + @Test + public void testAddFullCloneFlagOnVMwareDest(){ + strategy.addFullCloneFlagOnVMwareDest(dataTO); + verify(dataStoreTO).setFullCloneFlag(FULL_CLONE_FLAG); + } + + @Test + public void testAddFullCloneFlagOnNotVmwareDest(){ + when(dataTO.getHypervisorType()).thenReturn(HypervisorType.Any); + verify(dataStoreTO, never()).setFullCloneFlag(any(Boolean.class)); + } + +} diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index a6db828f22fc..522b8ae04402 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -30,6 +30,7 @@ import java.util.Collections; import java.util.Comparator; import java.util.Date; +import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -100,6 +101,7 @@ import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.StorageSubSystemCommand; import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.lang.math.NumberUtils; @@ -266,6 +268,7 @@ import com.cloud.storage.resource.VmwareStorageLayoutHelper; import com.cloud.storage.resource.VmwareStorageProcessor; import com.cloud.storage.resource.VmwareStorageSubsystemCommandHandler; +import com.cloud.storage.resource.VmwareStorageProcessor.VmwareStorageProcessorConfigurableFields; import com.cloud.storage.template.TemplateProp; import com.cloud.utils.DateUtil; import com.cloud.utils.ExecutionResult; @@ -537,15 +540,54 @@ public Answer executeRequest(Command cmd) { protected void checkStorageProcessorAndHandlerNfsVersionAttribute(StorageSubSystemCommand cmd) { if (storageNfsVersion != null) return; if (cmd instanceof CopyCommand){ - examineStorageSubSystemCommandNfsVersion((CopyCommand) cmd); + EnumMap params = new EnumMap(VmwareStorageProcessorConfigurableFields.class); + examineStorageSubSystemCommandNfsVersion((CopyCommand) cmd, params); + params = examineStorageSubSystemCommandFullCloneFlagForVmware((CopyCommand) cmd, params); + reconfigureProcessorByHandler(params); } } + /** + * Reconfigure processor by handler + * @param params params + */ + protected void reconfigureProcessorByHandler(EnumMap params) { + VmwareStorageSubsystemCommandHandler handler = (VmwareStorageSubsystemCommandHandler) storageHandler; + boolean success = handler.reconfigureStorageProcessor(params); + if (success){ + s_logger.info("VmwareStorageProcessor and VmwareStorageSubsystemCommandHandler successfully reconfigured"); + } else { + s_logger.error("Error while reconfiguring VmwareStorageProcessor and VmwareStorageSubsystemCommandHandler, params=" + _gson.toJson(params)); + } + } + + /** + * Examine StorageSubSystem command to get full clone flag, if provided + * @param cmd command to execute + * @param params params + * @return copy of params including new values, if suitable + */ + protected EnumMap examineStorageSubSystemCommandFullCloneFlagForVmware(CopyCommand cmd, EnumMap params) { + EnumMap paramsCopy = new EnumMap(params); + HypervisorType hypervisor = cmd.getDestTO().getHypervisorType(); + if (hypervisor != null && hypervisor.equals(HypervisorType.VMware)){ + DataStoreTO destDataStore = cmd.getDestTO().getDataStore(); + if (destDataStore instanceof PrimaryDataStoreTO){ + PrimaryDataStoreTO dest = (PrimaryDataStoreTO) destDataStore; + if (dest.isFullCloneFlag() != null){ + paramsCopy.put(VmwareStorageProcessorConfigurableFields.FULL_CLONE_FLAG, dest.isFullCloneFlag().booleanValue()); + } + } + } + return paramsCopy; + } + /** * Examine StorageSubSystem command to get storage NFS version, if provided * @param cmd command to execute + * @param params params */ - protected void examineStorageSubSystemCommandNfsVersion(CopyCommand cmd){ + protected void examineStorageSubSystemCommandNfsVersion(CopyCommand cmd, EnumMap params){ DataStoreTO srcDataStore = cmd.getSrcTO().getDataStore(); boolean nfsVersionFound = false; @@ -554,7 +596,7 @@ protected void examineStorageSubSystemCommandNfsVersion(CopyCommand cmd){ } if (nfsVersionFound){ - setCurrentNfsVersionInProcessorAndHandler(); + params.put(VmwareStorageProcessorConfigurableFields.NFS_VERSION, storageNfsVersion); } } @@ -571,20 +613,6 @@ protected boolean getStorageNfsVersionFromNfsTO(NfsTO nfsTO){ return false; } - /** - * Sets _storageNfsVersion into storage processor and storage handler by calling reconfigureNfsVersion on the storage handler, - * which will set NFS version into it and the storage processor. - */ - protected void setCurrentNfsVersionInProcessorAndHandler() { - VmwareStorageSubsystemCommandHandler handler = (VmwareStorageSubsystemCommandHandler) storageHandler; - boolean success = handler.reconfigureNfsVersion(storageNfsVersion); - if (success){ - s_logger.info("NFS version " + storageNfsVersion + " successfully set in VmwareStorageProcessor and VmwareStorageSubsystemCommandHandler"); - } else { - s_logger.error("Error while setting NFS version " + storageNfsVersion); - } - } - /** * Registers the vm to the inventory given the vmx file. */ diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java index eb18e0b9a807..8ce65c42f60b 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -114,11 +114,25 @@ public class VmwareStorageProcessor implements StorageProcessor { + public enum VmwareStorageProcessorConfigurableFields { + NFS_VERSION("nfsVersion"), FULL_CLONE_FLAG("fullCloneFlag"); + + private String name; + + VmwareStorageProcessorConfigurableFields(String name){ + this.name = name; + } + + public String getName() { + return name; + } + } + private static final Logger s_logger = Logger.getLogger(VmwareStorageProcessor.class); private static final int DEFAULT_NFS_PORT = 2049; private final VmwareHostService hostService; - private final boolean _fullCloneFlag; + private boolean _fullCloneFlag; private final VmwareStorageMount mountService; private final VmwareResource resource; private final Integer _timeout; @@ -2394,4 +2408,9 @@ public void setNfsVersion(Integer nfsVersion){ this._nfsVersion = nfsVersion; s_logger.debug("VmwareProcessor instance now using NFS version: " + nfsVersion); } + + public void setFullCloneFlag(boolean value){ + this._fullCloneFlag = value; + s_logger.debug("VmwareProcessor instance - create full clone = " + (value ? "TRUE" : "FALSE")); + } } diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageSubsystemCommandHandler.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageSubsystemCommandHandler.java index 7252f51bf397..fc199722b2bb 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageSubsystemCommandHandler.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageSubsystemCommandHandler.java @@ -19,6 +19,7 @@ package com.cloud.storage.resource; import java.io.File; +import java.util.EnumMap; import org.apache.log4j.Logger; import org.apache.cloudstack.storage.command.CopyCmdAnswer; @@ -37,6 +38,7 @@ import com.cloud.agent.api.to.SwiftTO; import com.cloud.hypervisor.vmware.manager.VmwareStorageManager; import com.cloud.storage.DataStoreRole; +import com.cloud.storage.resource.VmwareStorageProcessor.VmwareStorageProcessorConfigurableFields; public class VmwareStorageSubsystemCommandHandler extends StorageSubsystemCommandHandlerBase { @@ -66,21 +68,25 @@ public VmwareStorageSubsystemCommandHandler(StorageProcessor processor, Integer this._nfsVersion = nfsVersion; } - /** - * Reconfigure NFS version for storage operations - * @param nfsVersion NFS version to set - * @return true if NFS version could be configured, false in other case - */ - public boolean reconfigureNfsVersion(Integer nfsVersion){ - try { - VmwareStorageProcessor processor = (VmwareStorageProcessor) this.processor; - processor.setNfsVersion(nfsVersion); - this._nfsVersion = nfsVersion; - return true; - } catch (Exception e){ - s_logger.error("Error while reconfiguring NFS version " + nfsVersion); - return false; + public boolean reconfigureStorageProcessor(EnumMap params) { + VmwareStorageProcessor processor = (VmwareStorageProcessor) this.processor; + for (VmwareStorageProcessorConfigurableFields key : params.keySet()){ + switch (key){ + case NFS_VERSION: + Integer nfsVersion = (Integer) params.get(key); + processor.setNfsVersion(nfsVersion); + this._nfsVersion = nfsVersion; + break; + case FULL_CLONE_FLAG: + boolean fullClone = (boolean) params.get(key); + processor.setFullCloneFlag(fullClone); + break; + default: + s_logger.error("Unknown reconfigurable field " + key.getName() + " for VmwareStorageProcessor"); + return false; + } } + return true; } @Override @@ -187,4 +193,5 @@ protected Answer execute(CopyCommand cmd) { return super.execute(cmd); } } + } diff --git a/plugins/hypervisors/vmware/test/com/cloud/hypervisor/vmware/resource/VmwareResourceTest.java b/plugins/hypervisors/vmware/test/com/cloud/hypervisor/vmware/resource/VmwareResourceTest.java index efaf6d28d8bf..3aac13234806 100644 --- a/plugins/hypervisors/vmware/test/com/cloud/hypervisor/vmware/resource/VmwareResourceTest.java +++ b/plugins/hypervisors/vmware/test/com/cloud/hypervisor/vmware/resource/VmwareResourceTest.java @@ -25,20 +25,24 @@ import static org.mockito.Mockito.any; import static org.mockito.Mockito.never; import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.times; import java.util.ArrayList; import static org.powermock.api.mockito.PowerMockito.whenNew; import java.util.Arrays; +import java.util.EnumMap; import java.util.HashMap; import java.util.Map; import org.apache.cloudstack.storage.command.CopyCommand; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InOrder; import org.mockito.InjectMocks; +import org.mockito.Matchers; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; @@ -60,11 +64,13 @@ import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.agent.api.to.VolumeTO; import com.cloud.hypervisor.vmware.mo.DatacenterMO; +import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.vmware.mo.VirtualMachineMO; import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.storage.resource.VmwareStorageProcessor; import com.cloud.storage.resource.VmwareStorageSubsystemCommandHandler; +import com.cloud.storage.resource.VmwareStorageProcessor.VmwareStorageProcessorConfigurableFields; import com.cloud.utils.exception.CloudRuntimeException; @@ -125,13 +131,19 @@ public VmwareHypervisorHost getHyperHost(VmwareContext context, Command cmd) { ManagedObjectReference mor; @Mock DatacenterMO datacenter; + @Mock + DataTO destDataTO; + @Mock + PrimaryDataStoreTO destDataStoreTO; CopyCommand storageCmd; + EnumMap params = new EnumMap(VmwareStorageProcessorConfigurableFields.class); private static final Integer NFS_VERSION = Integer.valueOf(3); private static final Integer NFS_VERSION_NOT_PRESENT = null; private static final long VRAM_MEMORY_SIZE = 131072l; private static final long VIDEO_CARD_MEMORY_SIZE = 65536l; + private static final Boolean FULL_CLONE_FLAG = true; @Before public void setup() throws Exception { @@ -139,11 +151,17 @@ public void setup() throws Exception { storageCmd = PowerMockito.mock(CopyCommand.class); doReturn(context).when(_resource).getServiceContext(null); when(cmd.getVirtualMachine()).thenReturn(vmSpec); + when(storageCmd.getSrcTO()).thenReturn(srcDataTO); when(srcDataTO.getDataStore()).thenReturn(srcDataNfsTO); when(srcDataNfsTO.getNfsVersion()).thenReturn(NFS_VERSION); when(videoCard.getVideoRamSizeInKB()).thenReturn(VIDEO_CARD_MEMORY_SIZE); when(volume.getPath()).thenReturn(VOLUME_PATH); + + when(storageCmd.getDestTO()).thenReturn(destDataTO); + when(destDataTO.getHypervisorType()).thenReturn(HypervisorType.VMware); + when(destDataTO.getDataStore()).thenReturn(destDataStoreTO); + when(destDataStoreTO.isFullCloneFlag()).thenReturn(FULL_CLONE_FLAG); } //Test successful scaling up the vm @@ -239,8 +257,9 @@ public void testgetNfsVersionFromNfsTONfsVersion(){ @Test public void testSetCurrentNfsVersionInProcessorAndHandler(){ - _resource.setCurrentNfsVersionInProcessorAndHandler(); - verify(storageHandler).reconfigureNfsVersion(any(Integer.class)); + params.put(VmwareStorageProcessorConfigurableFields.NFS_VERSION, NFS_VERSION); + _resource.reconfigureProcessorByHandler(params); + verify(storageHandler).reconfigureStorageProcessor(params); } // --------------------------------------------------------------------------------------------------- @@ -248,30 +267,69 @@ public void testSetCurrentNfsVersionInProcessorAndHandler(){ @Test public void testExamineStorageSubSystemCommandNfsVersionNotPresent(){ when(srcDataNfsTO.getNfsVersion()).thenReturn(NFS_VERSION_NOT_PRESENT); - _resource.examineStorageSubSystemCommandNfsVersion(storageCmd); - verify(_resource, never()).setCurrentNfsVersionInProcessorAndHandler(); + _resource.examineStorageSubSystemCommandNfsVersion(storageCmd,params); + assertTrue(params.isEmpty()); } @Test public void testExamineStorageSubSystemCommandNfsVersion(){ - _resource.examineStorageSubSystemCommandNfsVersion(storageCmd); - verify(_resource).setCurrentNfsVersionInProcessorAndHandler(); + _resource.examineStorageSubSystemCommandNfsVersion(storageCmd, params); + assertEquals(1, params.size()); + assertEquals(NFS_VERSION, params.get(VmwareStorageProcessorConfigurableFields.NFS_VERSION)); + } + + // --------------------------------------------------------------------------------------------------- + + @Test + public void testExamineStorageSubSystemCommandFullCloneFlagForVmwareNullHypervisor(){ + when(destDataTO.getHypervisorType()).thenReturn(null); + _resource.examineStorageSubSystemCommandFullCloneFlagForVmware(storageCmd, params); + verify(destDataTO, never()).getDataStore(); + } + + @Test + public void testExamineStorageSubSystemCommandFullCloneFlagForHypervisorNotVmware(){ + when(destDataTO.getHypervisorType()).thenReturn(HypervisorType.XenServer); + _resource.examineStorageSubSystemCommandFullCloneFlagForVmware(storageCmd, params); + verify(destDataTO, never()).getDataStore(); + } + + @Test + public void testExamineStorageSubSystemCommandFullCloneFlagForVmware(){ + EnumMap params2 = _resource.examineStorageSubSystemCommandFullCloneFlagForVmware(storageCmd, params); + verify(destDataTO).getDataStore(); + verify(destDataStoreTO, times(2)).isFullCloneFlag(); + assertEquals(1, params2.size()); + assertEquals(FULL_CLONE_FLAG, params2.get(VmwareStorageProcessorConfigurableFields.FULL_CLONE_FLAG)); + } + + @Test + public void testExamineStorageSubSystemCommandFullCloneFlagForVmwareNull(){ + when(destDataStoreTO.isFullCloneFlag()).thenReturn(null); + _resource.examineStorageSubSystemCommandFullCloneFlagForVmware(storageCmd, params); + verify(destDataTO).getDataStore(); + verify(destDataStoreTO).isFullCloneFlag(); + assertTrue(params.isEmpty()); } // --------------------------------------------------------------------------------------------------- + @SuppressWarnings("unchecked") @Test public void checkStorageProcessorAndHandlerNfsVersionAttributeVersionNotSet(){ _resource.checkStorageProcessorAndHandlerNfsVersionAttribute(storageCmd); - verify(_resource).examineStorageSubSystemCommandNfsVersion(storageCmd); + verify(_resource).examineStorageSubSystemCommandNfsVersion(Matchers.eq(storageCmd), any(EnumMap.class)); + verify(_resource).examineStorageSubSystemCommandFullCloneFlagForVmware(Matchers.eq(storageCmd), any(EnumMap.class)); + verify(_resource).reconfigureProcessorByHandler(any(EnumMap.class)); assertEquals(NFS_VERSION, _resource.storageNfsVersion); } + @SuppressWarnings("unchecked") @Test public void checkStorageProcessorAndHandlerNfsVersionAttributeVersionSet(){ _resource.storageNfsVersion = NFS_VERSION; _resource.checkStorageProcessorAndHandlerNfsVersionAttribute(storageCmd); - verify(_resource, never()).examineStorageSubSystemCommandNfsVersion(storageCmd); + verify(_resource, never()).examineStorageSubSystemCommandNfsVersion(Matchers.eq(storageCmd), any(EnumMap.class)); } @Test(expected=CloudRuntimeException.class) diff --git a/server/src/com/cloud/capacity/CapacityManagerImpl.java b/server/src/com/cloud/capacity/CapacityManagerImpl.java index d0ae3e99695a..a3d2c3f4babc 100644 --- a/server/src/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/com/cloud/capacity/CapacityManagerImpl.java @@ -1101,6 +1101,6 @@ public String getConfigComponentName() { @Override public ConfigKey[] getConfigKeys() { return new ConfigKey[] {CpuOverprovisioningFactor, MemOverprovisioningFactor, StorageCapacityDisableThreshold, StorageOverprovisioningFactor, - StorageAllocatedCapacityDisableThreshold, StorageOperationsExcludeCluster}; + StorageAllocatedCapacityDisableThreshold, StorageOperationsExcludeCluster, VmwareCreateCloneFull}; } } diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index dd7e817aaeaa..5c82464c0a6d 100644 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -281,7 +281,6 @@ import com.cloud.vm.dao.InstanceGroupVMMapDao; import com.cloud.vm.dao.NicDao; import com.cloud.vm.dao.SecondaryStorageVmDao; -import com.cloud.vm.dao.UserVmCloneSettingDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.UserVmDetailsDao; import com.cloud.vm.dao.VMInstanceDao; @@ -296,10 +295,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir // seconds - public enum UserVmCloneType { - full, linked - } - @Inject EntityManager _entityMgr; @Inject @@ -319,8 +314,6 @@ public enum UserVmCloneType { @Inject protected DomainDao _domainDao = null; @Inject - protected UserVmCloneSettingDao _vmCloneSettingDao = null; - @Inject protected UserVmDao _vmDao = null; @Inject protected UserVmJoinDao _vmJoinDao = null; @@ -3556,19 +3549,6 @@ public UserVmVO doInTransaction(TransactionStatus status) throws InsufficientCap vm.setDisplayVm(true); } - // If hypervisor is vSphere, check for clone type setting. - if (hypervisorType.equals(HypervisorType.VMware)) { - // retrieve clone flag. - UserVmCloneType cloneType = UserVmCloneType.linked; - String value = _configDao.getValue(Config.VmwareCreateFullClone.key()); - if (value != null) { - if (Boolean.parseBoolean(value) == true) - cloneType = UserVmCloneType.full; - } - UserVmCloneSettingVO vmCloneSettingVO = new UserVmCloneSettingVO(id, cloneType.toString()); - _vmCloneSettingDao.persist(vmCloneSettingVO); - } - long guestOSId = template.getGuestOSId(); GuestOSVO guestOS = _guestOSDao.findById(guestOSId); long guestOSCategoryId = guestOS.getCategoryId(); From a66ebbec61cc0574a48f828b0a36f1e82a520081 Mon Sep 17 00:00:00 2001 From: Milamber Date: Sat, 30 Jul 2016 09:04:23 +0100 Subject: [PATCH 030/687] Add the Transifex config for next version of CS (4.10) --- tools/transifex/.tx/config | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/tools/transifex/.tx/config b/tools/transifex/.tx/config index fa4bc1f4d40f..4579d97aacbc 100644 --- a/tools/transifex/.tx/config +++ b/tools/transifex/.tx/config @@ -1,3 +1,4 @@ + [main] host = https://www.transifex.com @@ -127,3 +128,22 @@ trans.pt_BR = work-dir/messages_pt_BR.properties trans.ru_RU = work-dir/messages_ru_RU.properties trans.zh_CN = work-dir/messages_zh_CN.properties +[CloudStack_UI.410xmessagesproperties] +source_file = work-dir/messages.properties +source_lang = en +trans.ar = work-dir/messages_ar.properties +trans.ca = work-dir/messages_ca.properties +trans.de_DE = work-dir/messages_de_DE.properties +trans.es = work-dir/messages_es.properties +trans.fr_FR = work-dir/messages_fr_FR.properties +trans.hu = work-dir/messages_hu.properties +trans.it_IT = work-dir/messages_it_IT.properties +trans.ja_JP = work-dir/messages_ja_JP.properties +trans.ko_KR = work-dir/messages_ko_KR.properties +trans.nb_NO = work-dir/messages_nb_NO.properties +trans.nl_NL = work-dir/messages_nl_NL.properties +trans.pl = work-dir/messages_pl.properties +trans.pt_BR = work-dir/messages_pt_BR.properties +trans.ru_RU = work-dir/messages_ru_RU.properties +trans.zh_CN = work-dir/messages_zh_CN.properties + From c12108cd18e85462998813ca98043c505b4e9eaa Mon Sep 17 00:00:00 2001 From: Milamber Date: Sun, 25 Sep 2016 12:07:41 +0100 Subject: [PATCH 031/687] Update L10N resource files with 4.10 strings from Transifex (20160925) --- .../resources/messages_de_DE.properties | 10 ++++ .../classes/resources/messages_es.properties | 50 +++++++++++++------ .../resources/messages_fr_FR.properties | 5 +- .../resources/messages_pt_BR.properties | 3 ++ 4 files changed, 50 insertions(+), 18 deletions(-) diff --git a/client/WEB-INF/classes/resources/messages_de_DE.properties b/client/WEB-INF/classes/resources/messages_de_DE.properties index 30d181155ba1..7147594d90a3 100644 --- a/client/WEB-INF/classes/resources/messages_de_DE.properties +++ b/client/WEB-INF/classes/resources/messages_de_DE.properties @@ -335,6 +335,7 @@ label.add.primary.storage=Hauptspeicher hinzuf\u00fcgen label.add.private.gateway=Privaten Gateway hinzuf\u00fcgen label.add.region=Region hinzuf\u00fcgen label.add.resources=Ressourcen hinzuf\u00fcgen +label.add.role=Rolle hinzuf\u00fcgen label.add.route=Route hinzuf\u00fcgen label.add.rule=Regel hinzuf\u00fcgen label.add.secondary.storage=Sekund\u00e4rspeicher hinzuf\u00fcgen @@ -574,6 +575,7 @@ label.delete.PA=Palo Alto l\u00f6schen label.delete.portable.ip.range=Portablen IP-Bereich l\u00f6schen label.delete.profile=Profil l\u00f6schen label.delete.project=Projekt l\u00f6schen +label.delete.role=Rolle l\u00f6schen label.delete.secondary.staging.store=Sekund\u00e4rer Staging Store l\u00f6schen label.delete.SRX=SRX l\u00f6schen label.delete.ucs.manager=UCS Manager l\u00f6schen @@ -658,6 +660,7 @@ label.edit.lb.rule=LB-Regel bearbeiten label.edit.network.details=Netzwerkdetails bearbeiten label.edit.project.details=Projektdetails bearbeiten label.edit.region=Region bearbeiten +label.edit.role=Rolle bearbeiten label.edit.rule=Regel bearbeiten label.edit.secondary.ips=Sekund\u00e4re IPs bearbeiten label.edit.tags=Schlagw\u00f6rter bearbeiten @@ -1161,6 +1164,8 @@ label.os.preference=OS Pr\u00e4ferenz label.os.type=OS Typ label.other=Andere label.outofbandmanagement.action=Aktion +label.outofbandmanagement.address=Adresse +label.outofbandmanagement.driver=Treiber label.outofbandmanagement.password=Passwort label.outofbandmanagement.port=Port label.outofbandmanagement.username=Benutzername @@ -1187,6 +1192,7 @@ label.password.reset.confirm=Passwort wurde zur\u00fcckgesetzt auf label.path=Pfad label.PA.threat.profile=Palo Alto Threat Profil label.perfect.forward.secrecy=Perfect Forward Secrecy +label.permission=Berechtigung label.persistent=Persistent label.physical.network.ID=Physikalisches Netzwerkkennung label.physical.network.name=Name des physischen Netzwerks @@ -1384,6 +1390,8 @@ label.retry.interval=Wiederversuchs-Interval label.review=Nachpr\u00fcfung label.revoke.project.invite=Einladung widerrufen label.role=Rolle +label.roles=Rollen +label.roletype=Rollentyp label.root.certificate=Root-Zertifikat label.root.disk.controller=Root-Festplatten-Controller label.root.disk.offering=Root-Festplattenangebot @@ -1392,6 +1400,7 @@ label.router.vm.scaled.up=Router VM hochskaliert label.routing.host=Routing Host label.routing=Routing label.rule.number=Regelnummer +label.rule=Regel label.rules=Regeln label.running.vms=Laufende VMs label.s3.access_key=Zugriffsschl\u00fcssel @@ -2110,6 +2119,7 @@ message.number.hosts=

\# of Hosts

message.number.pods=

Anzahl derPods

message.number.storage=

\# von Hauptspeichervolumina

message.number.zones=

\# of Zonen

+message.outofbandmanagement.action.maintenance=Warnung Host ist im Wartungsmodus message.password.has.been.reset.to=Passwort wurde zur\u00fcckgesetzt auf message.password.of.the.vm.has.been.reset.to=Passwort der VM wurde zur\u00fcckgesetzt auf message.pending.projects.1=Sie haben ausstehende Projekteinladungen\: diff --git a/client/WEB-INF/classes/resources/messages_es.properties b/client/WEB-INF/classes/resources/messages_es.properties index 81508456b497..10472564dbb2 100644 --- a/client/WEB-INF/classes/resources/messages_es.properties +++ b/client/WEB-INF/classes/resources/messages_es.properties @@ -335,6 +335,7 @@ label.add.primary.storage=A\u00f1adir Almacenamiento primario label.add.private.gateway=Agregar Gateway Privado label.add.region=Agregar Regi\u00f3n label.add.resources=Agregar Recursos +label.add.role=Agregar Rol label.add.route=Agregar ruta label.add.rule=Agregar regla label.add.secondary.storage=A\u00f1adir secundaria almacenamiento @@ -437,7 +438,7 @@ label.broadcasturi=broadcasturi label.broadcast.uri=URI de Broadcast label.broadcat.uri=URI de Broadcast label.brocade.vcs.address=Direcci\u00f3n del Switch VCS -label.brocade.vcs.details=Detalles del Switch VCS Brocade +label.brocade.vcs.details=Detalles del Switch VCS Brocade label.by.account=Por Cuenta label.by.alert.type=Por tipo de alerta label.by.availability=Por Disponibilidad @@ -574,6 +575,7 @@ label.delete.PA=Borrar Palo Alto label.delete.portable.ip.range=Borrar Rango IP Port\u00e1til label.delete.profile=Borrar Perfil label.delete.project=Eliminar proyecto +label.delete.role=Borrar Rol label.delete.secondary.staging.store=Borrar Almacenamiento Secundario Temporario label.delete.SRX=Borrar SRX label.delete.ucs.manager=Borrar UCS Manager @@ -641,7 +643,7 @@ label.domain.admin=Administrador de dominio label.domain.details=Detalles del Dominio label.domain=dominio label.domain.id=ID de dominio -label.domain.lower=dominio +label.domain.lower=dominio label.domain.name=Nombre de dominio label.domain.router=Router de Dominio label.domain.suffix=DNS sufijo de dominio (es decir, xyz.com) @@ -658,6 +660,7 @@ label.edit.lb.rule=Edite regla LB label.edit.network.details=Editar detalles de red label.edit.project.details=Editar detalles de proyecto label.edit.region=Editar Regi\u00f3n +label.edit.role=Editar Rol label.edit.rule=Editar regla label.edit.secondary.ips=Editar IPs secundarios label.edit.tags=Editar etiquetas @@ -1161,6 +1164,13 @@ label.os.preference=OS Preferencia label.os.type=tipo de Sistema Operativo label.other=Otro label.outofbandmanagement.action=Acci\u00f3n +label.outofbandmanagement.address=Direcci\u00f3n +label.outofbandmanagement.changepassword=Cambiar la contrase\u00f1a de la gesti\u00f3n Out-of-band +label.outofbandmanagement.configure=Configurar Gesti\u00f3n Out-of-band +label.outofbandmanagement.disable=Deshabilitar gesti\u00f3n Out-of-band +label.outofbandmanagement.driver=Driver +label.outofbandmanagement.enable=Habilitar gesti\u00f3n Out-of-band +label.outofbandmanagement=Gesti\u00f3n Out-of-Band label.outofbandmanagement.password=Contrase\u00f1a label.outofbandmanagement.port=Puerto label.outofbandmanagement.username=Nombre de usuario @@ -1187,6 +1197,7 @@ label.password.reset.confirm=La Contrase\u00f1a se ha cambiado a label.PA.threat.profile=Perf\u00edl de Amenazas Palo Alto label.path=Ruta label.perfect.forward.secrecy=Perfect Forward Secrecy +label.permission=Autorizaciones label.persistent=Persistente label.physical.network.ID=ID de red f\u00edsica label.physical.network.name=Nombre de red f\u00edsica @@ -1384,6 +1395,8 @@ label.retry.interval=Intervalo de Repetici\u00f3n label.review=Revisar label.revoke.project.invite=Cancelar Invitaci\u00f3n label.role=Papel +label.roles=Roles +label.roletype=Tipo de Rol label.root.certificate=Certificado Ra\u00edz label.root.disk.controller=Controladora de disco ROOT label.root.disk.offering=Root Disco Offering @@ -1392,6 +1405,7 @@ label.router.vm.scaled.up=Router VM Escaladas label.routing=Enrutamiento label.routing.host=Servidor de Routeo label.rule.number=N\u00famero de Regla +label.rule=Regla label.rules=Reglas label.running.vms=Ejecuci\u00f3n de m\u00e1quinas virtuales label.s3.access_key=Llave de Acceso @@ -1548,7 +1562,7 @@ label.sunday=domingo label.super.cidr.for.guest.networks=Super CIDR para las Redes Guest label.supported.services=Servicios Soportados label.supported.source.NAT.type=Tipo de Source NAT soportado -label.supportsstrechedl2subnet=Soporte de Subredes Streched L2 +label.supportsstrechedl2subnet=Soporte de Subredes Streched L2 label.suspend.project=Suspender Proyecto label.switch.type=Cambiar el tipo label.system.capacity=Capacidad de todo el sistema @@ -1906,10 +1920,10 @@ message.configuring.public.traffic=Configurando el tr\u00e1fico public message.configuring.storage.traffic=Configurando el tr\u00e1fico de almacenamiento message.confirm.action.force.reconnect=Por favor confirme que desea forzar la reconexi\u00f3n de este servidor message.confirm.add.vnmc.provider=Por favor confirme que desea agregar el proveedor VNMC. -message.confirm.archive.alert=Por favor confirme que desea archivar esta alerta. -message.confirm.archive.event=Por favor confirme que desea archivar este evento. -message.confirm.archive.selected.alerts=Por favor confirme que desea archivar las alertas seleccionadas -message.confirm.archive.selected.events=Por favor confirme que desea archivar los eventos seleccionados +message.confirm.archive.alert=Por favor confirme que desea archivar esta alerta. +message.confirm.archive.event=Por favor confirme que desea archivar este evento. +message.confirm.archive.selected.alerts=Por favor confirme que desea archivar las alertas seleccionadas +message.confirm.archive.selected.events=Por favor confirme que desea archivar los eventos seleccionados message.confirm.attach.disk=\u00bf Est\u00e1 seguro que desea conectar el disco? message.confirm.create.volume=\u00bfEst\u00e1 seguro que desea crear un volumen? message.confirm.current.guest.CIDR.unchanged=\u00bfDesea mantener el CIDR de la red guest actual sin cambios? @@ -1933,12 +1947,12 @@ message.confirm.delete.secondary.staging.store=Por favor confirme que desea borr message.confirm.delete.SRX=Por favor confirme que desa borrar este SRX message.confirm.delete.ucs.manager=Por favor confirme que desea borrar el UCS Manager message.confirm.destroy.router=Por favor confirme que desa borrar este router -message.confirm.disable.host=Por favor confirme que desea deshabitar este servidor +message.confirm.disable.host=Por favor confirme que desea deshabitar este servidor message.confirm.disable.network.offering=\u00bfEsta seguro que desea deshabilitar esta oferta de red? message.confirm.disable.provider=Por favor confirme que desea deshabitar este proveedor message.confirm.disable.vnmc.provider=Por favor confirme que desea deshabitar el proveedor VNMC. message.confirm.disable.vpc.offering=\u00bfEsta seguro que desea deshabitar esta oferta de VPC? -message.confirm.enable.host=Por favor confirme que desea habilitar este servidor +message.confirm.enable.host=Por favor confirme que desea habilitar este servidor message.confirm.enable.network.offering=\u00bfEsta seguro que desea habilitar esta oferta de red? message.confirm.enable.provider=Por favor confirme que desea habilitar este proveedor message.confirm.enable.vnmc.provider=Por favor confirme que desea habilitar el proveedor VNMC. @@ -1953,9 +1967,9 @@ message.confirm.release.dedicated.zone=\u00bfDesea liberar esta zona dedicada? message.confirm.release.dedicate.vlan.range=Por favor confirme que desea liberar este rango dedicado de VLAN message.confirm.remove.event=\u00bfEst\u00e1 seguro que desea remover este evento? message.confirm.remove.IP.range=Por favor confirme que desea remover este rango IP. -message.confirm.remove.load.balancer=Por favor confirme que desea remover esta VM del load balancer +message.confirm.remove.load.balancer=Por favor confirme que desea remover esta VM del load balancer message.confirm.remove.network.offering=\u00bfEsta seguro que desea remover esta oferta de red? -message.confirm.remove.selected.alerts=Por favor confirme que desea remover las alertas seleccionadas +message.confirm.remove.selected.alerts=Por favor confirme que desea remover las alertas seleccionadas message.confirm.remove.selected.events=Por favor confirme que desea borrar los eventos seleccionados message.confirm.remove.vmware.datacenter=Por favor confirme que desea remover el datacenter VMware message.confirm.remove.vpc.offering=\u00bfEsta seguro que desea remover esta oferta de VPC? @@ -1988,7 +2002,7 @@ message.decline.invitation=\u00bfEst\u00e1 segura que desea rechazar esta invita message.dedicated.zone.released=Dedicaci\u00f3n de zona liberada message.dedicate.zone=Dedicando zona message.delete.account=Por favor, confirme que desea eliminar esta cuenta. -message.delete.affinity.group=Por favor confirme que desea remover este grupo de afinidad. +message.delete.affinity.group=Por favor confirme que desea remover este grupo de afinidad. message.delete.gateway=Por favor confirme que usted quiere eliminar este gateway message.delete.project=Esta seguro que quiere eliminar este proyecto? message.delete.user=Por favor confirme que usted quiere eliminar este usuario @@ -1998,14 +2012,14 @@ message.delete.VPN.gateway=Por favor confirme que usted quiere eliminar este VPN message.desc.add.new.lb.sticky.rule=Agregar nueva regla Sticky al LB message.desc.advanced.zone=Para topologia de redes m\u00e1s sofisticadas. Este modelo de red provee la mayor flexibilidad al definir la redes guest y proveyendo ofertas de redes personalizadas tales como firewall, VPN, o soporte de load balancer. message.desc.basic.zone=Provee una red \u00fanica donde cada instancia de VM es asignada una IP directamente desde la red. El aislamiento de Guest puede proveerse por medio de layer-3 tales como los security groups (filtrado de direcciones IP por origen). -message.desc.cluster=Cada por debe contener uno o m\u00e1s clusters, ahora agregaremos el primero. Un cluster contiene un grupo de servidores. Los servidores en el cluster tiene el mismo hardware, ejecutan el mismo hipervisors, est\u00e1n en la misma subred, y utilizan el mismo almacenamiento compartido. Cada cluster consiste en uno o m\u00e1s servidores y uno o m\u00e1s servidores de almacenamiento primario. +message.desc.cluster=Cada por debe contener uno o m\u00e1s clusters, ahora agregaremos el primero. Un cluster contiene un grupo de servidores. Los servidores en el cluster tiene el mismo hardware, ejecutan el mismo hipervisors, est\u00e1n en la misma subred, y utilizan el mismo almacenamiento compartido. Cada cluster consiste en uno o m\u00e1s servidores y uno o m\u00e1s servidores de almacenamiento primario. message.desc.created.ssh.key.pair=Par de Claves SSH creadas. message.desc.create.ssh.key.pair=Por favor completar los siguientes datos para crear o registrar un par de claves ssh.

1. Si la clave p\u00fablica esta definida, CloudStack la registrar\u00e1. Uds puede usarla por medio de la clave privada.

2. Si la clave p\u00fablica no esta definida, CloudStack crear\u00e1 un nuevo Par de Claves SSH. En este caso, por favor copie y grab\u00e9 la clave privada. CloudStack no la almacenar\u00e1.
message.desc.host=Cada cluster debe contener por lo menos un host (servidor) para que la VMs Guest ejecuten all\u00ed, y se agregara el primero ahora. Para que un host funcione en CloudStack, se debe instalar el software del hypervisor, asignarle una IP al host en la red de Management y asegurarse que se conecte al servidores de management de CloudStack.

Indicar el nombre DNS del host o su direcci\u00f3n IP, el nombre del usuario (usualmente root), su contrase\u00f1a y las etiquetas necesarias para catalogar los hosts. message.desc.primary.storage=Cada cl\u00faster debe contener uno o m\u00e1s servidores primarios de almacenamiento, y ahora se agregara el primero. El almacenamiento primario contiene los volumenes de disco para todas las VMSs ejecutandose en los hosts del cl\u00faster. Utilice cualquier protocolo standard que soporte el hipervisor. message.desc.reset.ssh.key.pair=Pro favor especifique el par de claves ssh que desea agregar a esta VM. Tenga en cuenta que la clave de root ser\u00e1 cambiada al realizar esta operaci\u00f3n si la opci\u00f3n de contrase\u00f1a esta habilitada. message.desc.secondary.storage=Cada zona debe tener al menos un servidor de NFS secundario, y se agregar\u00e1 el primero ahora. El almacenamiento secundario guarda las plantillas de VM, las im\u00e1genes ISO, y las instantaneas de volumentes. Este server debe estar disponible a todos los hosts de la zona.

Complete con la direcci\u00f3n IP y el PATH exportado -message.desc.zone=Una zona es la unidad organizacional m\u00e1s grande en CloudStack, y t\u00edpicamente se corresponde con un datacenter en particular. Las Zonas proveen aislaci\u00f3n f\u00edsica y redundancia. Una zona consiste de uno o m\u00e1s pode (cada uno conteniendo servidores y almacenamiento primario), junto con el almacenamiento secundario que es compartido entre todos los pods en la zona +message.desc.zone=Una zona es la unidad organizacional m\u00e1s grande en CloudStack, y t\u00edpicamente se corresponde con un datacenter en particular. Las Zonas proveen aislaci\u00f3n f\u00edsica y redundancia. Una zona consiste de uno o m\u00e1s pode (cada uno conteniendo servidores y almacenamiento primario), junto con el almacenamiento secundario que es compartido entre todos los pods en la zona message.detach.disk=\u00bf Est\u00e1 seguro que desea desconectar este disco? message.detach.iso.confirm=Por favor, confirme que desea quitar el ISO de la instancia virtual message.disable.account=Por favor confirmar que desea deshabitar esta cuenta. Al hacerlo, todos los usuarios pertenecientes a ella dejaran de tener acceso a los recursos de cloud. Todas las m\u00e1quinas virtuales en ejecuci\u00f3n ser\u00e1n apagadas en forma inmediata. @@ -2110,6 +2124,10 @@ message.number.hosts=

\# de Anfitri\u00f3n

message.number.pods=

\# de Las vainas

message.number.storage=

\# de Almacenamiento primario

message.number.zones=

\# de Zonas

+message.outofbandmanagement.changepassword=Cambiar contrase\u00f1a de gesti\u00f3n Out-of-band +message.outofbandmanagement.configure=Configurar Gesti\u00f3n Out-of-band +message.outofbandmanagement.disable=Deshabilitar gesti\u00f3n Out-of-band +message.outofbandmanagement.enable=Habilitar gesti\u00f3n Out-of-band message.password.has.been.reset.to=La Contrase\u00f1a se ha cambiado a message.password.of.the.vm.has.been.reset.to=La Contrase\u00f1a se ha cambiado a message.pending.projects.1=Tiene invitaciones a proyectos pendientes\: @@ -2200,9 +2218,9 @@ message.validate.fieldrequired=Este campo es obligatorio. message.validate.fixfield=Por favor corrija este campo. message.validate.instance.name=El nombre de la instancia no puede ser m\u00e1s largo que 63 caracteres. Solo se permiten letras ASCII tales como a~z, A~Z, n\u00fameros 0~9, los guiones est\u00e1n permitidos. Deben empezar con una letra y finalizar con una letra o un n\u00famero. message.validate.invalid.characters=Se han hallado caracteres no v\u00e1lidos. Por favor, corr\u00edjalos. -message.validate.maxlength=Por favor ingrese no m\u00e1s que {0} caracteres. +message.validate.maxlength=Por favor ingrese no m\u00e1s que {0} caracteres. message.validate.max=Por favor ingrese un valor menor o igual que {0}. -message.validate.minlength=Por favor ingrese al menos {0} caracteres. +message.validate.minlength=Por favor ingrese al menos {0} caracteres. message.validate.number=Por favor ingrese un n\u00famero v\u00e1lido. message.validate.range.length=Por favor ingrese un valor entre {0} y {1} caracteres de longitud. message.validate.range=Por favor ingrese un valor entre {0} y {1}. diff --git a/client/WEB-INF/classes/resources/messages_fr_FR.properties b/client/WEB-INF/classes/resources/messages_fr_FR.properties index 4dc90821821d..adfc0220c025 100644 --- a/client/WEB-INF/classes/resources/messages_fr_FR.properties +++ b/client/WEB-INF/classes/resources/messages_fr_FR.properties @@ -33,7 +33,7 @@ error.unable.to.reach.management.server=Impossible d\\'attendre le serveur de ge error.unresolved.internet.name=Votre nom Internet ne peut pas \u00eatre r\u00e9solu. force.delete.domain.warning=Attention \: Choisir cette option entra\u00eenera la suppression de tous les domaines issus et l\\'ensemble des comptes associ\u00e9s, ainsi que de leur ressources force.delete=Forcer la suppression -force.remove.host.warning=Attention \: Choisir cette option entra\u00eenera CloudStack \u00e0\u00a0forecer l\\'arr\u00eat de l\\'ensemble des machines virtuelles avant d\\'enlever l\\'h\u00f4te du cluster +force.remove.host.warning=Attention \: Choisir cette option entra\u00eenera CloudStack \u00e0\u00a0forcer l\\'arr\u00eat de l\\'ensemble des machines virtuelles avant d\\'enlever cet h\u00f4te du cluster force.remove=Suppression forc\u00e9e force.stop=Forcer l\\'arr\u00eat force.stop.instance.warning=Attention \: un arr\u00eat forc\u00e9 sur cette instance est la dernier option. Cela peut engendrer des pertes de donn\u00e9es et/ou un comportement inconsistant de votre instance. @@ -1165,7 +1165,7 @@ label.os.preference=Pr\u00e9f\u00e9rence OS label.os.type=Type du OS label.other=Autre label.outofbandmanagement.action=Action -label.outofbandmanagement.action.issue=Probl\u00e8me dans l\\'allumage via la gestion du flux administration +label.outofbandmanagement.action.issue=Probl\u00e8me dans l\\'allumage via la gestion du flux administration label.outofbandmanagement.address=Adresse label.outofbandmanagement.changepassword=Modifier le mot de passe du flux d\\'administration label.outofbandmanagement.configure=Configurer la gestion du flux d\\'administration @@ -1175,6 +1175,7 @@ label.outofbandmanagement.enable=Activer la gestion du flux d\\'administration label.outofbandmanagement=Gestion flux administration label.outofbandmanagement.password=Mot de passe label.outofbandmanagement.port=Port +label.outofbandmanagement.reenterpassword=Re-saisir Mot de passe label.outofbandmanagement.username=Identifiant label.override.guest.traffic=Remplacer Trafic-invit\u00e9 label.override.public.traffic=Remplacer Trafic-public diff --git a/client/WEB-INF/classes/resources/messages_pt_BR.properties b/client/WEB-INF/classes/resources/messages_pt_BR.properties index 2ad2760d2360..2a53ad70630c 100644 --- a/client/WEB-INF/classes/resources/messages_pt_BR.properties +++ b/client/WEB-INF/classes/resources/messages_pt_BR.properties @@ -1161,6 +1161,7 @@ label.os.preference=Prefer\u00eancia de SO label.os.type=Tipo de SO label.other=Outro label.outofbandmanagement.action=A\u00e7\u00e3o +label.outofbandmanagement.address=Endere\u00e7o label.outofbandmanagement.password=Senha label.outofbandmanagement.port=Porta label.outofbandmanagement.username=Nome de usu\u00e1rio @@ -1187,6 +1188,7 @@ label.password=Senha label.path=Caminho (Path) label.PA.threat.profile=Palo Alto Threat Profile label.perfect.forward.secrecy=Perfect Forward Secrecy +label.permission=Permiss\u00e3o label.persistent=Persistente label.physical.network.ID=ID da rede f\u00edsica label.physical.network.name=Nome da rede f\u00edsica @@ -1392,6 +1394,7 @@ label.router.vm.scaled.up=VM do Roteador Escalonada label.routing.host=Host de Roteamento label.routing=Roteamento label.rule.number=Regra N\u00famero +label.rule=Regra label.rules=Regras label.running.vms=VMs Rodando label.s3.access_key=Chave de acesso From 8f35e89999d4125ab1968dd258c40674c0709c06 Mon Sep 17 00:00:00 2001 From: nvazquez Date: Mon, 26 Sep 2016 09:08:31 -0700 Subject: [PATCH 032/687] CLOUDSTACK-9505: Fix test_deploy_vgpu_enabled tests cleanup --- .../smoke/test_deploy_vgpu_enabled_vm.py | 147 +++++++++--------- 1 file changed, 71 insertions(+), 76 deletions(-) diff --git a/test/integration/smoke/test_deploy_vgpu_enabled_vm.py b/test/integration/smoke/test_deploy_vgpu_enabled_vm.py index d49ab08d4849..065753da6fa4 100644 --- a/test/integration/smoke/test_deploy_vgpu_enabled_vm.py +++ b/test/integration/smoke/test_deploy_vgpu_enabled_vm.py @@ -48,7 +48,6 @@ def setUpClass(self): testClient = super(TestDeployvGPUenabledVM, self).getClsTestClient() self.apiclient = testClient.getApiClient() self.testdata = self.testClient.getParsedTestDataConfig() - self._cleanup = [] self.unsupportedHypervisor = False self.noSuitableHost = False # Need to add check whether zone containing the xen hypervisor or not @@ -85,89 +84,89 @@ def setUpClass(self): # No XenServer available with GPU Drivers installed self.noSuitableHost = True return - - self.domain = get_domain(self.apiclient) - self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests()) - # Creating Account - self.account = Account.create( - self.apiclient, - self.testdata["account"], - domainid=self.domain.id - ) - self._cleanup.append(self.account) - + def setUp(self): self.testdata = self.testClient.getParsedTestDataConfig()["vgpu"] self.apiclient = self.testClient.getApiClient() - self.dbclient = self.testClient.getDbConnection() + self.dbclient = self.testClient.getDbConnection() if self.noSuitableHost or self.unsupportedHypervisor: self.hypervisor = get_hypervisor_type(self.apiclient) if self.hypervisor.lower() not in ["vmware"]: self.skipTest("Skipping test because suitable hypervisor/host not\ present") self.testdata = self.testClient.getParsedTestDataConfig() + + self.cleanup = [] # Get Zone, Domain and Default Built-in template self.domain = get_domain(self.apiclient) - self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests()) - - # Before running this test for Xen Server, register a windows template with ostype as - # 'Windows 7 (32-bit)' - self.template = get_template( - self.apiclient, - self.zone.id, - self.testdata["ostype"]) - - # create a user account + self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests()) + # Creating Account self.account = Account.create( - self.apiclient, - self.testdata["account"], - domainid=self.domain.id + self.apiclient, + self.testdata["account"], + domainid=self.domain.id ) - self.cleanup = [] - - if self.hypervisor.lower() in ["xenserver"]: - self.testdata["mode"] = self.zone.networktype - if self.template == FAILED: + if self.hypervisor.lower() in ["xenserver"]: + + # Before running this test for Xen Server, register a windows template with ostype as + # 'Windows 7 (32-bit)' + self.template = get_template( + self.apiclient, + self.zone.id, + self.testdata["ostype"]) + self.cleanup.append(self.template) + + self.testdata["mode"] = self.zone.networktype + + if self.template == FAILED: assert False, "get_template() failed to return template with description %s" % self.testdata[ "ostype"] - self.testdata["small"]["zoneid"] = self.zone.id - self.testdata["small"]["template"] = self.template.id + self.testdata["small"]["zoneid"] = self.zone.id + self.testdata["small"]["template"] = self.template.id - self.testdata["service_offerings"]["vgpu260qwin"]["serviceofferingdetails"] = [ + self.testdata["service_offerings"]["vgpu260qwin"]["serviceofferingdetails"] = [ { 'pciDevice': 'Group of NVIDIA Corporation GK107GL [GRID K1] GPUs'}, { 'vgpuType': 'GRID K120Q'}] # create a service offering - self.service_offering = ServiceOffering.create( + self.service_offering = ServiceOffering.create( self.apiclient, self.testdata["service_offerings"]["vgpu260qwin"], ) - # build cleanup list - self.cleanup = [ - self.service_offering, - self.account - ] - elif self.hypervisor.lower() in ["vmware"]: - self.testdata["isolated_network"]["zoneid"] = self.zone.id - self.userapiclient = self.testClient.getUserApiClient( + self.cleanup.append(self.service_offering) + + elif self.hypervisor.lower() in ["vmware"]: + self.testdata["isolated_network"]["zoneid"] = self.zone.id + + self.userapiclient = self.testClient.getUserApiClient( UserName=self.account.name, DomainName=self.account.domain ) - self.service_offering = ServiceOffering.create( + self.service_offering = ServiceOffering.create( self.apiclient, self.testdata["service_offering"]) - self.cleanup.append(self.service_offering) - # Create Shared Network Offering - self.isolated_network_offering = NetworkOffering.create( + # Create Shared Network Offering + self.isolated_network_offering = NetworkOffering.create( self.apiclient, self.testdata["isolated_network_offering"]) - self.cleanup.append(self.isolated_network_offering) # Enable Isolated Network offering - self.isolated_network_offering.update(self.apiclient, state='Enabled') + self.isolated_network_offering.update(self.apiclient, state='Enabled') + + # Register a private template in the account with nic adapter vmxnet3 + # Also add required 3D GPU details for enabling it + self.template = Template.register( + self.userapiclient, + self.testdata["configurableData"]["vmxnet3template"], + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid, + details=[{"mks.enable3d" : "true", "mks.use3dRenderer" : "automatic", + "svga.autodetect" : "false", "svga.vramSize" : "131072"}] + ) @attr(tags=['advanced', 'basic', 'vgpu'], required_hardware="true") @@ -179,10 +178,11 @@ def test_deploy_vgpu_enabled_vm(self): # 2. Virtual Machine is vGPU enabled (via SSH) # 3. listVirtualMachines returns accurate information """ - - if self.hypervisor.lower() not in ["xenserver"]: + if self.hypervisor.lower() not in ["xenserver"]: + self.cleanup.append(self.account) self.skipTest("This test case is written specifically\ for XenServer hypervisor") + self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["small"], @@ -191,6 +191,7 @@ def test_deploy_vgpu_enabled_vm(self): serviceofferingid=self.service_offering.id, mode=self.testdata['mode'] ) + self.cleanup.append(self.virtual_machine) list_vms = VirtualMachine.list( self.apiclient, @@ -240,11 +241,11 @@ def test_deploy_vgpu_enabled_vm(self): user=self.testdata['configurableData']['host']["username"], passwd=self.testdata['configurableData']['host']["password"]) res = sshClient.execute( - "xe vgpu-list vm-name-label=%s params=type-uuid %s" % + "xe vgpu-list vm-name-label=%s params=type-uuid %s" % (vm.instancename)) self.debug("SSH result: %s" % res) except Exception as e: - self.fail("SSH Access failed for %s: %s" % + self.fail("SSH Access failed for %s: %s" % (hostip, e) ) result = str(res) @@ -253,16 +254,18 @@ def test_deploy_vgpu_enabled_vm(self): 1, "VM is vGPU enabled." ) + self.cleanup.append(self.account) def tearDown(self): try: cleanup_resources(self.apiclient, self.cleanup) except Exception as e: self.debug("Warning! Exception in tearDown: %s" % e) + return @attr(tags=["advanced"]) def test_3d_gpu_support(self): - """ + """Test 3D GPU support # 1. Register a template for VMware with nicAdapter vmxnet3 and 3D GPU details # 2. Deploy a VM using this template @@ -270,29 +273,17 @@ def test_3d_gpu_support(self): # 4. Add network to VM # 5. Verify vm details for 3D GPU details """ - if self.hypervisor.lower() not in ["vmware"]: + self.cleanup.append(self.account) self.skipTest("This test case is written specifically\ for Vmware hypervisor") - # Register a private template in the account with nic adapter vmxnet3 - # Also add required 3D GPU details for enabling it - template = Template.register( - self.userapiclient, - self.testdata["configurableData"]["vmxnet3template"], - zoneid=self.zone.id, - account=self.account.name, - domainid=self.account.domainid, - details=[{"mks.enable3d" : "true", "mks.use3dRenderer" : "automatic", - "svga.autodetect" : "false", "svga.vramSize" : "131072"}] - ) - self.cleanup.append(template) - template.download(self.apiclient) + self.template.download(self.apiclient) templates = Template.list( self.userapiclient, listall=True, - id=template.id, + id=self.template.id, templatefilter="self" ) @@ -303,28 +294,30 @@ def test_3d_gpu_support(self): ) self.testdata["virtual_machine"]["zoneid"] = self.zone.id - self.testdata["virtual_machine"]["template"] = template.id + self.testdata["virtual_machine"]["template"] = self.template.id - virtual_machine = VirtualMachine.create( + self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, - templateid=template.id, + templateid=self.template.id, serviceofferingid=self.service_offering.id) - isolated_network = Network.create( + self.isolated_network = Network.create( self.apiclient, self.testdata["isolated_network"], self.account.name, self.account.domainid, networkofferingid=self.isolated_network_offering.id) - virtual_machine.add_nic(self.apiclient, isolated_network.id) + self.virtual_machine.add_nic(self.apiclient, self.isolated_network.id) + + self.cleanup = [self.virtual_machine, self.isolated_network, self.isolated_network_offering, self.service_offering, self.account, self.template] - qresultset = self.dbclient.execute("select id from vm_instance where uuid = '%s';" % virtual_machine.id) + qresultset = self.dbclient.execute("select id from vm_instance where uuid = '%s';" % self.virtual_machine.id) vm_id = qresultset[0] - qresultset = self.dbclient.execute("select name, value from user_vm_details where vm_id = '%d';" % vm_id) + qresultset = self.dbclient.execute("select name, value from user_vm_details where vm_id = '%d';" % vm_id) detailKeys = [x[0] for x in qresultset] self.assertTrue('mks.enable3d' in detailKeys and 'mks.use3dRenderer' in detailKeys and 'svga.autodetect' in detailKeys and 'svga.vramSize' in detailKeys, "VM details do not contain 3D GPU details") @@ -336,3 +329,5 @@ def test_3d_gpu_support(self): self.assertEquals('false', qresultset[detailKeys.index('svga.autodetect')][1], "Expected detail 'svga.autodetect'='false'") self.assertEquals('131072', qresultset[detailKeys.index('svga.vramSize')][1], "Expected detail 'svga.vramSize'='131072'") + + return From d4ae1ab6b642270cfd0bfb7d0c550b4486bef7f1 Mon Sep 17 00:00:00 2001 From: Mike Tutkowski Date: Fri, 23 Sep 2016 22:47:04 -0700 Subject: [PATCH 033/687] Switched to the official SolidFire SDK for Python --- requirements.txt | 3 + .../plugins/solidfire/TestAddRemoveHosts.py | 27 +- .../plugins/solidfire/TestManagedSystemVMs.py | 588 ++++++++++++++++++ .../plugins/solidfire/TestSnapshots.py | 201 +++--- .../solidfire/TestVMMigrationWithStorage.py | 54 +- .../plugins/solidfire/TestVMSnapshots.py | 18 +- .../plugins/solidfire/TestVolumes.py | 41 +- .../plugins/solidfire/util/sf_util.py | 42 +- 8 files changed, 787 insertions(+), 187 deletions(-) create mode 100644 test/integration/plugins/solidfire/TestManagedSystemVMs.py diff --git a/requirements.txt b/requirements.txt index f5c766623112..4485f5d73f96 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,3 +19,6 @@ cloudmonkey # Marvin dependencies are installed via its bundle + +# Install the SolidFire SDK for Python +solidfire-sdk-python \ No newline at end of file diff --git a/test/integration/plugins/solidfire/TestAddRemoveHosts.py b/test/integration/plugins/solidfire/TestAddRemoveHosts.py index a13c61a8e7c5..4c3d261f6e15 100644 --- a/test/integration/plugins/solidfire/TestAddRemoveHosts.py +++ b/test/integration/plugins/solidfire/TestAddRemoveHosts.py @@ -21,6 +21,8 @@ import time import XenAPI +from solidfire.factory import ElementFactory + from util import sf_util # All tests inherit from cloudstackTestCase @@ -37,8 +39,6 @@ # utils - utility classes for common cleanup, external library wrappers, etc. from marvin.lib.utils import cleanup_resources -from solidfire import solidfire_element_api as sf_api - # Prerequisites: # Only one zone # Only one pod @@ -59,12 +59,10 @@ class TestData: diskSize = "disksize" domainId = "domainId" hypervisor = "hypervisor" - login = "login" mvip = "mvip" name = "name" newHost = "newHost" newHostDisplayName = "newHostDisplayName" - osType = "ostype" password = "password" podId = "podid" port = "port" @@ -89,7 +87,7 @@ def __init__(self): self.testdata = { TestData.solidFire: { TestData.mvip: "192.168.139.112", - TestData.login: "admin", + TestData.username: "admin", TestData.password: "admin", TestData.port: 443, TestData.url: "https://192.168.139.112:443" @@ -170,7 +168,6 @@ def __init__(self): "diskname": "testvolume2", }, TestData.newHostDisplayName: "XenServer-6.5-3", - TestData.osType: "CentOS 5.6(64-bit) no GUI (XenServer)", TestData.zoneId: 1, TestData.clusterId: 1, TestData.domainId: 1, @@ -186,7 +183,9 @@ class TestAddRemoveHosts(cloudstackTestCase): def setUpClass(cls): # Set up API client testclient = super(TestAddRemoveHosts, cls).getClsTestClient() + cls.apiClient = testclient.getApiClient() + cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata @@ -203,12 +202,14 @@ def setUpClass(cls): cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) # Set up SolidFire connection - cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire]) + solidfire = cls.testdata[TestData.solidFire] + + cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] - cls.template = get_template(cls.apiClient, cls.zone.id, cls.testdata[TestData.osType]) + cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account @@ -249,7 +250,7 @@ def tearDownClass(cls): try: cleanup_resources(cls.apiClient, cls._cleanup) - sf_util.purge_solidfire_volumes(cls.sf_client) + sf_util.purge_solidfire_volumes(cls.sfe) except Exception as e: logging.debug("Exception in tearDownClass(cls): %s" % e) @@ -423,8 +424,8 @@ def test_add_remove_host_with_solidfire_plugin_4(self): self._perform_add_remove_host(primary_storage_2.id, sf_iscsi_name) - def _perform_add_remove_host(self, primary_storage_id, sf_iscsi_name): - xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sf_iscsi_name)[0] + def _perform_add_remove_host(self, primary_storage_id, sr_name): + xen_sr = self.xen_session.xenapi.SR.get_by_name_label(sr_name)[0] pbds = self.xen_session.xenapi.SR.get_PBDs(xen_sr) @@ -651,10 +652,10 @@ def _get_sf_vag_id(self, cluster_id, primary_storage_id): return sf_vag_id def _get_sf_vag(self, sf_vag_id): - return self.sf_client.list_volume_access_groups(sf_vag_id, 1)["volumeAccessGroups"][0] + return self.sfe.list_volume_access_groups(sf_vag_id, 1).volume_access_groups[0] def _get_sf_vag_initiators(self, sf_vag): - return sf_vag["initiators"] + return sf_vag.initiators def _verifyVag(self, host_iscsi_iqns, sf_vag_initiators): self.assertEqual( diff --git a/test/integration/plugins/solidfire/TestManagedSystemVMs.py b/test/integration/plugins/solidfire/TestManagedSystemVMs.py new file mode 100644 index 000000000000..994266c20339 --- /dev/null +++ b/test/integration/plugins/solidfire/TestManagedSystemVMs.py @@ -0,0 +1,588 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import logging +import random +import SignedAPICall +import XenAPI + +from solidfire.factory import ElementFactory + +from util import sf_util + +from marvin.cloudstackAPI import destroySystemVm + +# All tests inherit from cloudstackTestCase +from marvin.cloudstackTestCase import cloudstackTestCase + +from nose.plugins.attrib import attr + +# Import Integration Libraries + +# base - contains all resources as entities and defines create, delete, list operations on them +from marvin.lib.base import Account, Router, ServiceOffering, StoragePool, User, VirtualMachine, Zone + +# common - commonly used methods for all tests are listed here +from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_ssvms, list_routers + +# utils - utility classes for common cleanup, external library wrappers, etc. +from marvin.lib.utils import cleanup_resources, wait_until + +# Prerequisites: +# * Only use one SolidFire cluster for the two primary storages based on the "SolidFire" storage plug-in. +# * Do not run other workloads on the SolidFire cluster while running this test as this test checks at a certain +# point to make sure no active SolidFire volumes exist. +# * Only one zone +# * Only one secondary storage VM and one console proxy VM running on NFS (no virtual router or user VMs exist) +# * Only one pod +# * Only one cluster +# * Set storage.cleanup.enabled to true +# * Set storage.cleanup.interval to 150 +# * Set storage.cleanup.delay to 60 + + +class TestData(): + account = "account" + capacityBytes = "capacitybytes" + capacityIops = "capacityiops" + clusterId = "clusterid" + computeOffering = "computeoffering" + diskOffering = "diskoffering" + domainId = "domainid" + email = "email" + firstname = "firstname" + hypervisor = "hypervisor" + lastname = "lastname" + max_iops = "maxiops" + min_iops = "miniops" + mvip = "mvip" + name = "name" + password = "password" + port = "port" + primaryStorage = "primarystorage" + provider = "provider" + scope = "scope" + solidFire = "solidfire" + storageTag = "SolidFire_SAN_1" + systemOffering = "systemoffering" + systemOfferingFailure = "systemofferingFailure" + tags = "tags" + url = "url" + user = "user" + username = "username" + virtualMachine = "virtualmachine" + xenServer = "xenserver" + zoneId = "zoneid" + + def __init__(self): + self.testdata = { + TestData.solidFire: { + TestData.mvip: "192.168.139.112", + TestData.username: "admin", + TestData.password: "admin", + TestData.port: 443, + TestData.url: "https://192.168.139.112:443" + }, + TestData.xenServer: { + TestData.username: "root", + TestData.password: "solidfire" + }, + TestData.account: { + TestData.email: "test@test.com", + TestData.firstname: "John", + TestData.lastname: "Doe", + TestData.username: "test", + TestData.password: "test" + }, + TestData.user: { + TestData.email: "user@test.com", + TestData.firstname: "Jane", + TestData.lastname: "Doe", + TestData.username: "testuser", + TestData.password: "password" + }, + TestData.primaryStorage: { + TestData.name: TestData.get_name_for_solidfire_storage(), + TestData.scope: "ZONE", + TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" + + "clusterAdminUsername=admin;clusterAdminPassword=admin;" + + "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" + + "clusterDefaultBurstIopsPercentOfMaxIops=1.5;", + TestData.provider: "SolidFire", + TestData.tags: TestData.storageTag, + TestData.capacityIops: 4500000, + TestData.capacityBytes: 2251799813685248, + TestData.hypervisor: "Any", + TestData.zoneId: 1 + }, + TestData.virtualMachine: { + TestData.name: "TestVM", + "displayname": "Test VM" + }, + TestData.computeOffering: { + TestData.name: "SF_CO_1", + "displaytext": "SF_CO_1 (Min IOPS = 10,000; Max IOPS = 15,000)", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 128, + "storagetype": "shared", + "customizediops": False, + TestData.min_iops: 10000, + TestData.max_iops: 15000, + "hypervisorsnapshotreserve": 200, + TestData.tags: TestData.storageTag + }, + TestData.systemOffering: { + TestData.name: "SF_SO_1", + "displaytext": "Managed SO (Min IOPS = 4,000; Max IOPS = 8,000)", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 128, + "storagetype": "shared", + TestData.min_iops: 4000, + TestData.max_iops: 8000, + TestData.tags: TestData.storageTag, + "issystem": True + }, + TestData.systemOfferingFailure: { + TestData.name: "SF_SO_2", + "displaytext": "Managed SO (Customized IOPS)", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 128, + "storagetype": "shared", + "customizediops": True, + TestData.tags: TestData.storageTag, + "issystem": True + }, + TestData.zoneId: 1, + TestData.clusterId: 1, + TestData.domainId: 1, + TestData.url: "192.168.129.50" + } + + @staticmethod + def get_name_for_solidfire_storage(): + return "SolidFire-%d" % random.randint(0, 100) + + +class TestManagedSystemVMs(cloudstackTestCase): + _unique_name_suffix = "-Temp" + + _secondary_storage_unique_name = "Cloud.com-SecondaryStorage" + _secondary_storage_temp_unique_name = _secondary_storage_unique_name + _unique_name_suffix + + _console_proxy_unique_name = "Cloud.com-ConsoleProxy" + _console_proxy_temp_unique_name = _console_proxy_unique_name + _unique_name_suffix + + _virtual_router_unique_name = "Cloud.com-SoftwareRouter" + _virtual_router_temp_unique_name = _virtual_router_unique_name + _unique_name_suffix + + @classmethod + def setUpClass(cls): + # Set up API client + testclient = super(TestManagedSystemVMs, cls).getClsTestClient() + + cls.apiClient = testclient.getApiClient() + cls.configData = testclient.getParsedTestDataConfig() + cls.dbConnection = testclient.getDbConnection() + + cls.testdata = TestData().testdata + + # Set up xenAPI connection + host_ip = "https://" + \ + list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress + + # Set up XenAPI connection + cls.xen_session = XenAPI.Session(host_ip) + + xenserver = cls.testdata[TestData.xenServer] + + cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) + + # Set up SolidFire connection + solidfire = cls.testdata[TestData.solidFire] + + cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) + + # Get Resources from Cloud Infrastructure + cls.zone = Zone(get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]).__dict__) + cls.cluster = list_clusters(cls.apiClient)[0] + cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) + cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) + + # Create test account + cls.account = Account.create( + cls.apiClient, + cls.testdata["account"], + admin=1 + ) + + # Set up connection to make customized API calls + cls.user = User.create( + cls.apiClient, + cls.testdata["user"], + account=cls.account.name, + domainid=cls.domain.id + ) + + url = cls.testdata[TestData.url] + + api_url = "http://" + url + ":8080/client/api" + userkeys = User.registerUserKeys(cls.apiClient, cls.user.id) + + cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) + + cls.compute_offering = ServiceOffering.create( + cls.apiClient, + cls.testdata[TestData.computeOffering] + ) + + systemoffering = cls.testdata[TestData.systemOffering] + + systemoffering[TestData.name] = "Managed SSVM" + systemoffering['systemvmtype'] = "secondarystoragevm" + + cls.secondary_storage_offering = ServiceOffering.create( + cls.apiClient, + systemoffering + ) + + systemoffering[TestData.name] = "Managed CPVM" + systemoffering['systemvmtype'] = "consoleproxy" + + cls.console_proxy_offering = ServiceOffering.create( + cls.apiClient, + systemoffering + ) + + systemoffering[TestData.name] = "Managed VR" + systemoffering['systemvmtype'] = "domainrouter" + + cls.virtual_router_offering = ServiceOffering.create( + cls.apiClient, + systemoffering + ) + + # Resources that are to be destroyed + cls._cleanup = [ + cls.secondary_storage_offering, + cls.console_proxy_offering, + cls.virtual_router_offering, + cls.compute_offering, + cls.user, + cls.account + ] + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiClient, cls._cleanup) + except Exception as e: + logging.debug("Exception in tearDownClass(cls): %s" % e) + + def setUp(self): + self.cleanup = [] + + def tearDown(self): + try: + cleanup_resources(self.apiClient, self.cleanup) + + sf_util.purge_solidfire_volumes(self.sfe) + except Exception as e: + logging.debug("Exception in tearDownClass(self): %s" % e) + + @attr(hypervisor='XenServer') + def test_01_create_system_vms_on_managed_storage(self): + self._disable_zone_and_delete_system_vms(None, False) + + primary_storage = self.testdata[TestData.primaryStorage] + + primary_storage_1 = StoragePool.create( + self.apiClient, + primary_storage + ) + + self._prepare_to_use_managed_storage_for_system_vms() + + enabled = "Enabled" + + self.zone.update(self.apiClient, id=self.zone.id, allocationstate=enabled) + + system_vms = self._wait_for_and_get_running_system_vms(2) + + virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=True + ) + + # This virtual machine was only created and started so that the virtual router would be created and started. + # Just delete this virtual machine once it has been created and started. + virtual_machine.delete(self.apiClient, True) + + virtual_router = list_routers(self.apiClient, listall=True, state="Running")[0] + + system_vms.append(virtual_router) + + self._check_system_vms(system_vms, primary_storage_1.id) + + primary_storage[TestData.name] = TestData.get_name_for_solidfire_storage() + + primary_storage_2 = StoragePool.create( + self.apiClient, + primary_storage + ) + + StoragePool.enableMaintenance(self.apiClient, primary_storage_1.id) + + self._wait_for_storage_cleanup_thread(system_vms) + + sf_util.purge_solidfire_volumes(self.sfe) + + system_vms = self._wait_for_and_get_running_system_vms(2) + + virtual_router = list_routers(self.apiClient, listall=True, state="Running")[0] + + system_vms.append(virtual_router) + + self._check_system_vms(system_vms, primary_storage_2.id) + + StoragePool.cancelMaintenance(self.apiClient, primary_storage_1.id) + + primary_storage_1.delete(self.apiClient) + + self._disable_zone_and_delete_system_vms(virtual_router) + + self._wait_for_storage_cleanup_thread(system_vms) + + sf_util.purge_solidfire_volumes(self.sfe) + + primary_storage_2.delete(self.apiClient) + + self._verify_no_active_solidfire_volumes() + + self._prepare_to_stop_using_managed_storage_for_system_vms() + + self.zone.update(self.apiClient, id=self.zone.id, allocationstate=enabled) + + self._wait_for_and_get_running_system_vms(2) + + @attr(hypervisor='XenServer') + def test_02_failure_to_create_service_offering_with_customized_iops(self): + try: + ServiceOffering.create( + self.apiClient, + self.testdata[TestData.systemOfferingFailure] + ) + + self.assert_(True, "The service offering was created, but should not have been.") + except: + pass + + def _prepare_to_use_managed_storage_for_system_vms(self): + self._update_system_vm_unique_name(TestManagedSystemVMs._secondary_storage_unique_name, TestManagedSystemVMs._secondary_storage_temp_unique_name) + self._update_system_vm_unique_name(TestManagedSystemVMs._console_proxy_unique_name, TestManagedSystemVMs._console_proxy_temp_unique_name) + self._update_system_vm_unique_name(TestManagedSystemVMs._virtual_router_unique_name, TestManagedSystemVMs._virtual_router_temp_unique_name) + + self._update_system_vm_unique_name_based_on_uuid(self.secondary_storage_offering.id, TestManagedSystemVMs._secondary_storage_unique_name) + self._update_system_vm_unique_name_based_on_uuid(self.console_proxy_offering.id, TestManagedSystemVMs._console_proxy_unique_name) + self._update_system_vm_unique_name_based_on_uuid(self.virtual_router_offering.id, TestManagedSystemVMs._virtual_router_unique_name) + + def _prepare_to_stop_using_managed_storage_for_system_vms(self): + self._update_system_vm_unique_name_based_on_uuid(self.secondary_storage_offering.id, None) + self._update_system_vm_unique_name_based_on_uuid(self.console_proxy_offering.id, None) + self._update_system_vm_unique_name_based_on_uuid(self.virtual_router_offering.id, None) + + self._update_system_vm_unique_name(TestManagedSystemVMs._secondary_storage_temp_unique_name, TestManagedSystemVMs._secondary_storage_unique_name) + self._update_system_vm_unique_name(TestManagedSystemVMs._console_proxy_temp_unique_name, TestManagedSystemVMs._console_proxy_unique_name) + self._update_system_vm_unique_name(TestManagedSystemVMs._virtual_router_temp_unique_name, TestManagedSystemVMs._virtual_router_unique_name) + + def _wait_for_storage_cleanup_thread(self, system_vms): + retry_interval = 60 + num_tries = 10 + + wait_result, return_val = wait_until(retry_interval, num_tries, self._check_resource_state, system_vms) + + if not wait_result: + raise Exception(return_val) + + def _check_resource_state(self, system_vms): + try: + self._verify_system_vms_deleted(system_vms) + + return True, None + except: + return False, "The system is not in the necessary state." + + def _verify_system_vms_deleted(self, system_vms): + for system_vm in system_vms: + cs_root_volume = self._get_root_volume_for_system_vm(system_vm.id, 'Expunged') + + self._verify_managed_system_vm_deleted(cs_root_volume.name) + + def _disable_zone_and_delete_system_vms(self, virtual_router, verify_managed_system_vm_deleted=True): + self.zone.update(self.apiClient, id=self.zone.id, allocationstate="Disabled") + + if virtual_router is not None: + Router.destroy(self.apiClient, virtual_router.id) + + if verify_managed_system_vm_deleted: + cs_root_volume = self._get_root_volume_for_system_vm(virtual_router.id, 'Expunged') + + self._verify_managed_system_vm_deleted(cs_root_volume.name) + + # list_ssvms lists the secondary storage VM and the console proxy VM + system_vms = list_ssvms(self.apiClient) + + for system_vm in system_vms: + destroy_ssvm_cmd = destroySystemVm.destroySystemVmCmd() + + destroy_ssvm_cmd.id = system_vm.id + + self.apiClient.destroySystemVm(destroy_ssvm_cmd) + + if verify_managed_system_vm_deleted: + cs_root_volume = self._get_root_volume_for_system_vm(system_vm.id, 'Expunged') + + self._verify_managed_system_vm_deleted(cs_root_volume.name) + + def _verify_managed_system_vm_deleted(self, cs_root_volume_name): + sf_not_active_volumes = sf_util.get_not_active_sf_volumes(self.sfe) + + sf_root_volume = sf_util.check_and_get_sf_volume(sf_not_active_volumes, cs_root_volume_name, self) + + self.assertEqual( + len(sf_root_volume.volume_access_groups), + 0, + "The volume should not be in a volume access group." + ) + + sr_name = sf_util.format_iqn(sf_root_volume.iqn) + + sf_util.check_xen_sr(sr_name, self.xen_session, self, False) + + def _wait_for_and_get_running_system_vms(self, expected_number_of_system_vms): + retry_interval = 60 + num_tries = 10 + + wait_result, return_val = wait_until(retry_interval, num_tries, self._check_number_of_running_system_vms, expected_number_of_system_vms) + + if not wait_result: + raise Exception(return_val) + + return return_val + + def _check_number_of_running_system_vms(self, expected_number_of_system_vms): + # list_ssvms lists the secondary storage VM and the console proxy VM + system_vms = list_ssvms(self.apiClient, state="Running") + + if system_vms is not None and len(system_vms) == expected_number_of_system_vms: + return True, system_vms + + return False, "Timed out waiting for running system VMs" + + def _verify_no_active_solidfire_volumes(self): + sf_active_volumes = sf_util.get_active_sf_volumes(self.sfe) + + sf_util.check_list(sf_active_volumes, 0, self, "There should be no active SolidFire volumes in the cluster.") + + def _check_system_vms(self, system_vms, primary_storage_id): + sf_active_volumes = sf_util.get_active_sf_volumes(self.sfe) + + sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, primary_storage_id, self) + + for system_vm in system_vms: + cs_root_volume = self._get_root_volume_for_system_vm(system_vm.id, 'Ready') + sf_root_volume = sf_util.check_and_get_sf_volume(sf_active_volumes, cs_root_volume.name, self) + + sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, cs_root_volume, self) + + sf_util.check_size_and_iops(sf_root_volume, cs_root_volume, sf_volume_size, self) + + self._check_iops_against_iops_of_system_offering(cs_root_volume, self.testdata[TestData.systemOffering]) + + sf_util.check_vag(sf_root_volume, sf_vag_id, self) + + sr_name = sf_util.format_iqn(sf_root_volume.iqn) + + sf_util.check_xen_sr(sr_name, self.xen_session, self) + + def _check_iops_against_iops_of_system_offering(self, cs_volume, system_offering): + self.assertEqual( + system_offering[TestData.min_iops], + cs_volume.miniops, + "Check QoS - Min IOPS: of " + cs_volume.name + " should be " + str(system_offering[TestData.min_iops]) + ) + + self.assertEqual( + system_offering[TestData.max_iops], + cs_volume.maxiops, + "Check QoS - Min IOPS: of " + cs_volume.name + " should be " + str(system_offering[TestData.max_iops]) + ) + + def _get_root_volume_for_system_vm(self, system_vm_id, state): + sql_query = "Select id From vm_instance Where uuid = '" + system_vm_id + "'" + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench + sql_result = self.dbConnection.execute(sql_query) + + instance_id = sql_result[0][0] + + sql_query = "Select uuid, name, min_iops, max_iops From volumes Where instance_id = " + str(instance_id) + \ + " and state = '" + state + "' Order by removed desc" + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench + sql_result = self.dbConnection.execute(sql_query) + + uuid = sql_result[0][0] + name = sql_result[0][1] + min_iops = sql_result[0][2] + max_iops = sql_result[0][3] + + class CloudStackVolume(object): + pass + + cs_volume = CloudStackVolume() + + cs_volume.id = uuid + cs_volume.name = name + cs_volume.miniops = min_iops + cs_volume.maxiops = max_iops + + return cs_volume + + def _update_system_vm_unique_name(self, unique_name, new_unique_name): + sql_query = "Update disk_offering set unique_name = '" + new_unique_name + "' Where unique_name = '" + unique_name + "'" + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench + self.dbConnection.execute(sql_query) + + def _update_system_vm_unique_name_based_on_uuid(self, uuid, new_unique_name): + if (new_unique_name is None): + sql_query = "Update disk_offering set unique_name = NULL Where uuid = '" + uuid + "'" + else: + sql_query = "Update disk_offering set unique_name = '" + new_unique_name + "' Where uuid = '" + uuid + "'" + + # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench + self.dbConnection.execute(sql_query) + diff --git a/test/integration/plugins/solidfire/TestSnapshots.py b/test/integration/plugins/solidfire/TestSnapshots.py index 9ae10f335fe2..df45c6134d1d 100644 --- a/test/integration/plugins/solidfire/TestSnapshots.py +++ b/test/integration/plugins/solidfire/TestSnapshots.py @@ -21,6 +21,8 @@ import time import XenAPI +from solidfire.factory import ElementFactory + from util import sf_util # All tests inherit from cloudstackTestCase @@ -39,8 +41,6 @@ # utils - utility classes for common cleanup, external library wrappers, etc. from marvin.lib.utils import cleanup_resources, wait_until -from solidfire import solidfire_element_api as sf_api - # Prerequisites: # Only one zone # Only one pod @@ -57,7 +57,6 @@ class TestData(): diskOffering = "diskoffering" domainId = "domainId" hypervisor = "hypervisor" - login = "login" mvip = "mvip" password = "password" port = "port" @@ -67,7 +66,6 @@ class TestData(): solidFire = "solidfire" storageTag = "SolidFire_SAN_1" tags = "tags" - templateName = "templatename" url = "url" user = "user" username = "username" @@ -81,7 +79,7 @@ def __init__(self): self.testdata = { TestData.solidFire: { TestData.mvip: "192.168.139.112", - TestData.login: "admin", + TestData.username: "admin", TestData.password: "admin", TestData.port: 443, TestData.url: "https://192.168.139.112:443" @@ -223,7 +221,6 @@ def __init__(self): TestData.volume_2: { TestData.diskName: "test-volume-2", }, - TestData.templateName: "CentOS 5.6(64-bit) no GUI (XenServer)", TestData.zoneId: 1, TestData.clusterId: 1, TestData.domainId: 1, @@ -252,7 +249,9 @@ class TestSnapshots(cloudstackTestCase): def setUpClass(cls): # Set up API client testclient = super(TestSnapshots, cls).getClsTestClient() + cls.apiClient = testclient.getApiClient() + cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata @@ -269,12 +268,14 @@ def setUpClass(cls): cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) # Set up SolidFire connection - cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire]) + solidfire = cls.testdata[TestData.solidFire] + + cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] - cls.template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName]) + cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account @@ -338,7 +339,7 @@ def tearDownClass(cls): cls.primary_storage.delete(cls.apiClient) - sf_util.purge_solidfire_volumes(cls.sf_client) + sf_util.purge_solidfire_volumes(cls.sfe) except Exception as e: logging.debug("Exception in tearDownClass(cls): %s" % e) @@ -377,14 +378,14 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -409,7 +410,7 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): virtual_machine.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -418,7 +419,7 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): self._delete_and_test_snapshot(vol_snap_1) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -427,7 +428,7 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): self._delete_and_test_snapshot(vol_snap_2) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg) @@ -454,14 +455,14 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): vm_1_root_volume_name = vm_1_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -502,14 +503,14 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): vm_2_root_volume_name = vm_2_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots_2 = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID']) + sf_snapshots_2 = self.sfe.list_snapshots(volume_id=sf_volume_2.volume_id).snapshots sf_util.check_list(sf_snapshots_2, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -522,7 +523,7 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): volume_created_from_snapshot_name = volume_created_from_snapshot.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) @@ -530,7 +531,7 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) - sf_util.check_list(sf_volume_3['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) + sf_util.check_list(sf_volume_3.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) volume_created_from_snapshot = virtual_machine.attach_volume( self.apiClient, @@ -542,7 +543,7 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): virtual_machine.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) @@ -562,7 +563,7 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): self._delete_and_test_snapshot(vol_snap_1) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) @@ -572,7 +573,7 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): virtual_machine_2.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -592,7 +593,7 @@ def test_01_create_volume_snapshot_using_sf_snapshot(self): data_volume.delete(self.apiClient) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg) @@ -625,21 +626,21 @@ def test_02_create_volume_snapshot_using_sf_volume(self): sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage) - sf_volume_id = sf_volume['volumeID'] - sf_volume_size = sf_volume['totalSize'] + sf_volume_id = sf_volume.volume_id + sf_volume_size = sf_volume.total_size vol_snap_1 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 1, primary_storage_db_id, sf_volume_size, sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) @@ -665,7 +666,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): virtual_machine.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) @@ -696,19 +697,19 @@ def test_02_create_volume_snapshot_using_sf_volume(self): vm_1_root_volume_name = vm_1_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) - sf_volume_id = sf_volume['volumeID'] - sf_volume_size = sf_volume['totalSize'] + sf_volume_id = sf_volume.volume_id + sf_volume_size = sf_volume.total_size vol_snap_1 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 1, primary_storage_db_id, sf_volume_size, sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) @@ -750,19 +751,19 @@ def test_02_create_volume_snapshot_using_sf_volume(self): vm_2_root_volume_name = vm_2_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg) sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots_2 = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID']) + sf_snapshots_2 = self.sfe.list_snapshots(volume_id=sf_volume_2.volume_id).snapshots sf_util.check_list(sf_snapshots_2, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) - sf_volume_id_2 = sf_volume_2['volumeID'] - sf_volume_size_2 = sf_volume_2['totalSize'] + sf_volume_id_2 = sf_volume_2.volume_id + sf_volume_size_2 = sf_volume_2.total_size vol_snap_a = self._create_and_test_snapshot_2(vm_2_root_volume.id, sf_volume_id_2, sf_volume_id + 5, primary_storage_db_id, sf_volume_size_2, sf_account_id, 6, TestSnapshots._should_be_six_volumes_in_list_err_msg) @@ -774,7 +775,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): volume_created_from_snapshot_name = volume_created_from_snapshot.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 7, self, TestSnapshots._should_be_seven_volumes_in_list_err_msg) @@ -782,7 +783,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) - sf_util.check_list(sf_volume_3['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) + sf_util.check_list(sf_volume_3.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) volume_created_from_snapshot = virtual_machine.attach_volume( self.apiClient, @@ -794,7 +795,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): virtual_machine.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg) @@ -815,7 +816,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): virtual_machine_2.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -833,7 +834,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): data_volume.delete(self.apiClient) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg) @@ -860,7 +861,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): vm_1_root_volume_name = vm_1_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -870,14 +871,14 @@ def test_02_create_volume_snapshot_using_sf_volume(self): sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) - sf_volume_id = sf_volume['volumeID'] - sf_volume_size = sf_volume['totalSize'] + sf_volume_id = sf_volume.volume_id + sf_volume_size = sf_volume.total_size vol_snap_1 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 1, primary_storage_db_id, sf_volume_size, sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) @@ -888,21 +889,21 @@ def test_02_create_volume_snapshot_using_sf_volume(self): volume_created_from_snapshot_name = volume_created_from_snapshot.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) - sf_util.check_list(sf_volume_2['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) + sf_util.check_list(sf_volume_2.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) volume_created_from_snapshot = virtual_machine.attach_volume( self.apiClient, volume_created_from_snapshot ) - sf_volume_id_2 = sf_volume_2['volumeID'] - sf_volume_size_2 = sf_volume_2['totalSize'] + sf_volume_id_2 = sf_volume_2.volume_id + sf_volume_size_2 = sf_volume_2.total_size vol_snap_a = self._create_and_test_snapshot_2(volume_created_from_snapshot.id, sf_volume_id_2, sf_volume_id + 3, primary_storage_db_id, sf_volume_size_2, sf_account_id, 4, TestSnapshots._should_be_four_volumes_in_list_err_msg) @@ -914,7 +915,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): virtual_machine.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -935,7 +936,7 @@ def test_02_create_volume_snapshot_using_sf_volume(self): data_volume.delete(self.apiClient) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -981,21 +982,21 @@ def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage) - sf_volume_id = sf_volume['volumeID'] - sf_volume_size = sf_volume['totalSize'] + sf_volume_id = sf_volume.volume_id + sf_volume_size = sf_volume.total_size vol_snap_1 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 1, primary_storage_db_id, sf_volume_size, sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg) @@ -1040,14 +1041,14 @@ def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): vm_2_root_volume_name = vm_2_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 4, self, TestSnapshots._should_be_four_volumes_in_list_err_msg) sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume_2.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -1056,7 +1057,7 @@ def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): volume_created_from_snapshot_1 = Volume.create_from_snapshot(self.apiClient, vol_snap_2.id, services, account=self.account.name, domainid=self.domain.id) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg) @@ -1096,14 +1097,14 @@ def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): vm_3_root_volume_name = vm_3_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 6, self, TestSnapshots._should_be_six_volumes_in_list_err_msg) sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, vm_3_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_3['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume_3.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -1112,7 +1113,7 @@ def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): volume_created_from_snapshot_a = Volume.create_from_snapshot(self.apiClient, vol_snap_b.id, services, account=self.account.name, domainid=self.domain.id) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 7, self, TestSnapshots._should_be_seven_volumes_in_list_err_msg) @@ -1124,7 +1125,7 @@ def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): virtual_machine.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) # should still be 7 volumes because the SolidFire volume for the root disk of the VM just destroyed # is still needed for the SolidFire snapshots @@ -1133,14 +1134,14 @@ def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): virtual_machine_2.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 6, self, TestSnapshots._should_be_six_volumes_in_list_err_msg) virtual_machine_3.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg) @@ -1149,7 +1150,7 @@ def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): data_volume.delete(self.apiClient) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 4, self, TestSnapshots._should_be_four_volumes_in_list_err_msg) @@ -1158,7 +1159,7 @@ def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): data_volume.delete(self.apiClient) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) @@ -1167,7 +1168,7 @@ def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): self._delete_and_test_snapshot(vol_snap_b) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) # should still be 2 volumes because the SolidFire volume for the root disk of the VM just destroyed # is still needed for the SolidFire snapshots @@ -1176,7 +1177,7 @@ def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self): self._delete_and_test_snapshot(vol_snap_a) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -1211,14 +1212,14 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -1247,7 +1248,7 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): virtual_machine.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -1256,7 +1257,7 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): self._delete_and_test_archive_snapshot(vol_snap_1_archive) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -1265,7 +1266,7 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): self._delete_and_test_snapshot(vol_snap_2) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg) @@ -1292,14 +1293,14 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): vm_1_root_volume_name = vm_1_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID']) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume.volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -1342,14 +1343,14 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): vm_2_root_volume_name = vm_2_root_volume.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) # Get snapshot information for volume from SolidFire cluster - sf_snapshots_2 = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID']) + sf_snapshots_2 = self.sfe.list_snapshots(volume_id=sf_volume_2.volume_id).snapshots sf_util.check_list(sf_snapshots_2, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -1362,7 +1363,7 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): volume_created_from_snapshot_name = volume_created_from_snapshot.name # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) @@ -1370,7 +1371,7 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name) sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name) - sf_util.check_list(sf_volume_3['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) + sf_util.check_list(sf_volume_3.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) volume_created_from_snapshot = virtual_machine.attach_volume( self.apiClient, @@ -1382,7 +1383,7 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): virtual_machine.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg) @@ -1404,7 +1405,7 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): self._delete_and_test_snapshot(vol_snap_3) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg) @@ -1414,7 +1415,7 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): virtual_machine_2.delete(self.apiClient, True) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg) @@ -1434,7 +1435,7 @@ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self): data_volume.delete(self.apiClient) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg) @@ -1511,10 +1512,10 @@ def _most_recent_sf_snapshot(self, sf_snapshots): sf_snapshot_to_return = None for sf_snapshot in sf_snapshots: - if (sf_snapshot['snapshotID'] > most_recent_id): + if (sf_snapshot.snapshot_id > most_recent_id): sf_snapshot_to_return = sf_snapshot - most_recent_id = sf_snapshot['snapshotID'] + most_recent_id = sf_snapshot.snapshot_id if (sf_snapshot_to_return == None): raise Exception("Unable to find the most recent SolidFire snapshot in the provided list") @@ -1541,7 +1542,7 @@ def _get_sf_volume_by_name(self, sf_volumes, sf_volume_name): sf_volume = None for volume in sf_volumes: - if volume['name'] == sf_volume_name: + if volume.name == sf_volume_name: sf_volume = volume break @@ -1560,7 +1561,7 @@ def _get_sf_volume_by_id(self, sf_volumes, sf_volume_id): sf_volume = None for volume in sf_volumes: - if volume['volumeID'] == sf_volume_id: + if volume.volume_id == sf_volume_id: sf_volume = volume break @@ -1582,7 +1583,7 @@ def _get_snapshot_detail(self, sf_snapshot_details_list, key): def _check_sf_snapshot_does_not_exist(self, sf_snapshots, sf_snapshot_id): for sf_snapshot in sf_snapshots: - if sf_snapshot["snapshotID"] == sf_snapshot: + if sf_snapshot.snapshot_id == sf_snapshot: raise Exception("The following SolidFire snapshot ID should not exist: " + sf_snapshot_id) def _check_snapshot_details_do_not_exist(self, vol_snap_db_id): @@ -1606,10 +1607,10 @@ def _create_and_test_snapshot(self, volume_id_for_snapshot, sf_volume, primary_s self._wait_for_snapshot_state(vol_snap.id, Snapshot.BACKED_UP) - sf_volume_id = sf_volume['volumeID'] + sf_volume_id = sf_volume.volume_id # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_id) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume_id).snapshots sf_util.check_list(sf_snapshots, expected_num_snapshots, self, snapshot_err_msg) @@ -1621,16 +1622,16 @@ def _create_and_test_snapshot(self, volume_id_for_snapshot, sf_volume, primary_s vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap) - self._check_snapshot_details(sf_snapshot_details, vol_snap_db_id, sf_volume_id, sf_snapshot['snapshotID'], primary_storage_db_id, sf_volume['totalSize']) + self._check_snapshot_details(sf_snapshot_details, vol_snap_db_id, sf_volume_id, sf_snapshot.snapshot_id, primary_storage_db_id, sf_volume.total_size) return vol_snap # used when SolidFire snapshots are being used for CloudStack volume snapshots to create a backup on secondary storage def _create_and_test_archive_snapshot(self, volume_id_for_snapshot, sf_volume): - sf_volume_id = sf_volume['volumeID'] + sf_volume_id = sf_volume.volume_id # Get snapshot information for volume from SolidFire cluster - sf_snapshots_orig = self.sf_client.list_snapshots(sf_volume_id) + sf_snapshots_orig = self.sfe.list_snapshots(sf_volume_id).snapshots vol_snap = Snapshot.create( self.apiClient, @@ -1641,7 +1642,7 @@ def _create_and_test_archive_snapshot(self, volume_id_for_snapshot, sf_volume): self._wait_for_snapshot_state(vol_snap.id, Snapshot.BACKED_UP) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(sf_volume_id) + sf_snapshots = self.sfe.list_snapshots(sf_volume_id).snapshots sf_util.check_list(sf_snapshots, len(sf_snapshots_orig), self, "A new SolidFire snapshot was detected.") @@ -1662,7 +1663,7 @@ def _create_and_test_snapshot_2(self, volume_id_for_snapshot, sf_volume_id, sf_v self._wait_for_snapshot_state(vol_snap.id, Snapshot.BACKED_UP) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_id) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume_id).snapshots sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg) @@ -1675,13 +1676,13 @@ def _create_and_test_snapshot_2(self, volume_id_for_snapshot, sf_volume_id, sf_v self._check_snapshot_details_2(sf_snapshot_details, vol_snap_db_id, sf_volume_id_for_volume_snapshot, primary_storage_db_id, sf_volume_size) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, expected_num_volumes, self, volume_err_msg) sf_volume_for_snapshot = self._get_sf_volume_by_id(sf_volumes, sf_volume_id_for_volume_snapshot) - sf_util.check_list(sf_volume_for_snapshot['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) + sf_util.check_list(sf_volume_for_snapshot.volume_access_groups, 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg) return vol_snap @@ -1722,7 +1723,7 @@ def _delete_and_test_snapshot(self, vol_snap): vol_snap.delete(self.apiClient) # Get snapshot information for volume from SolidFire cluster - sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_id) + sf_snapshots = self.sfe.list_snapshots(volume_id=sf_volume_id).snapshots self._check_sf_snapshot_does_not_exist(sf_snapshots, sf_snapshot_id) @@ -1741,6 +1742,6 @@ def _delete_and_test_snapshot_2(self, vol_snap, sf_account_id, expected_num_volu self._check_snapshot_details_do_not_exist(vol_snap_db_id) # Get volume information from SolidFire cluster - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_util.check_list(sf_volumes, expected_num_volumes, self, volume_err_msg) diff --git a/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py b/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py index 255df07a54a1..adbb44be9507 100644 --- a/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py +++ b/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py @@ -20,6 +20,8 @@ import SignedAPICall import XenAPI +from solidfire.factory import ElementFactory + from util import sf_util # All tests inherit from cloudstackTestCase @@ -34,8 +36,6 @@ # utils - utility classes for common cleanup, external library wrappers, etc. from marvin.lib.utils import cleanup_resources -from solidfire import solidfire_element_api as sf_api - # Prerequisites: # Only one zone # Only one pod @@ -71,7 +71,6 @@ class TestData(): storageTag2 = "SolidFire_Volume_1" tags = "tags" templateCacheName = "centos56-x86-64-xen" - templateName = "templatename" testAccount = "testaccount" url = "url" user = "user" @@ -86,7 +85,7 @@ def __init__(self): self.testdata = { TestData.solidFire: { TestData.mvip: "192.168.139.112", - TestData.login: "admin", + TestData.username: "admin", TestData.password: "admin", TestData.port: 443, TestData.url: "https://192.168.139.112:443" @@ -208,7 +207,6 @@ def __init__(self): TestData.volume_1: { TestData.diskName: "test-volume", }, - TestData.templateName: "CentOS 5.6(64-bit) no GUI (XenServer)", TestData.zoneId: 1, TestData.clusterId1: 1, TestData.clusterId2: 2, @@ -224,7 +222,9 @@ class TestVMMigrationWithStorage(cloudstackTestCase): def setUpClass(cls): # Set up API client testclient = super(TestVMMigrationWithStorage, cls).getClsTestClient() + cls.apiClient = testclient.getApiClient() + cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata @@ -250,13 +250,15 @@ def setUpClass(cls): cls.xen_session_2.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) # Set up SolidFire connection - cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire]) + solidfire = cls.testdata[TestData.solidFire] + + cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster_1 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId1])[0] cls.cluster_2 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId2])[0] - cls.template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName]) + cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account @@ -330,6 +332,8 @@ def tearDownClass(cls): cleanup_resources(cls.apiClient, cls._cleanup) cls.primary_storage.delete(cls.apiClient) + + sf_util.purge_solidfire_volumes(cls.sfe) except Exception as e: logging.debug("Exception in tearDownClass(cls): %s" % e) @@ -340,7 +344,7 @@ def tearDown(self): try: cleanup_resources(self.apiClient, self.cleanup) - sf_util.purge_solidfire_volumes(self.sf_client) + sf_util.purge_solidfire_volumes(self.sfe) except Exception as e: logging.debug("Exception in tearDownClass(self): %s" % e) @@ -366,7 +370,7 @@ def test_01_storage_migrate_root_and_data_disks(self): sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg) - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self) @@ -386,7 +390,7 @@ def test_01_storage_migrate_root_and_data_disks(self): cs_data_volume ) - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self) @@ -451,7 +455,7 @@ def test_02_storage_migrate_root_and_data_disks(self): sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg) - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self) @@ -497,7 +501,7 @@ def _execute_migration_failure(self, compute_offering_id, disk_offering_id): sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg) - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self) @@ -517,7 +521,7 @@ def _execute_migration_failure(self, compute_offering_id, disk_offering_id): cs_data_volume ) - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self) @@ -549,7 +553,7 @@ def _migrate_and_verify(self, virtual_machine, dest_host, cs_root_volume, cs_dat cs_root_volume = self._get_updated_cs_volume(cs_root_volume.id) cs_data_volume = self._get_updated_cs_volume(cs_data_volume.id) - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) dest_sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self) dest_sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self) @@ -580,7 +584,7 @@ def _migrate_and_verify_one_disk_only(self, virtual_machine, dest_host, cs_volum cs_volume = self._get_updated_cs_volume(cs_volume.id) - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) dest_sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_volume.name, self) @@ -624,7 +628,7 @@ def __init__(self, *args, **kwargs): self._verifyFields(cs_root_volume_refreshed, src_sf_root_volume) self._verifyFields(cs_data_volume_refreshed, src_sf_data_volume) - sf_volumes = sf_util.get_not_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_not_active_sf_volumes(self.sfe, sf_account_id) dest_sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self) dest_sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self) @@ -633,11 +637,11 @@ def __init__(self, *args, **kwargs): self._verify_xenserver_state(dest_xen_session, dest_sf_data_volume, src_xen_session, src_sf_data_volume) def _verify_different_volume_access_groups(self, src_sf_volume, dest_sf_volume): - src_vags = src_sf_volume['volumeAccessGroups'] + src_vags = src_sf_volume.volume_access_groups sf_util.check_list(src_vags, 1, self, "'src_vags' should be a list with only one element in it.") - dest_vags = dest_sf_volume['volumeAccessGroups'] + dest_vags = dest_sf_volume.volume_access_groups sf_util.check_list(dest_vags, 1, self, "'dest_vags' should be a list with only one element in it.") @@ -647,23 +651,23 @@ def _get_updated_cs_volume(self, cs_volume_id): return list_volumes(self.apiClient, listall=True, id=cs_volume_id)[0] def _verify_same_account(self, src_sf_volume, dest_sf_volume): - self.assertEqual(src_sf_volume['accountID'], dest_sf_volume['accountID'], "The source and destination volumes should be in the same SolidFire account.") + self.assertEqual(src_sf_volume.account_id, dest_sf_volume.account_id, "The source and destination volumes should be in the same SolidFire account.") def _verifySfVolumeIds(self, src_sf_volume, dest_sf_volume): - self.assert_(src_sf_volume['volumeID'] < dest_sf_volume['volumeID'], + self.assert_(src_sf_volume.volume_id < dest_sf_volume.volume_id, "The destination SolidFire root volume's ID should be greater than the id of the source one.") # verify the name, folder, and iscsi_name def _verifyFields(self, cs_volume, sf_volume): - self.assert_(cs_volume.name == sf_volume['name'], "The CloudStack volume name does not match the SolidFire volume name.") + self.assert_(cs_volume.name == sf_volume.name, "The CloudStack volume name does not match the SolidFire volume name.") cs_volume_folder = self._get_cs_volume_folder(cs_volume.id) - self.assert_(int(cs_volume_folder) == sf_volume['volumeID'], "The CloudStack folder name does not match the SolidFire volume ID.") + self.assert_(int(cs_volume_folder) == sf_volume.volume_id, "The CloudStack folder name does not match the SolidFire volume ID.") cs_volume_iscsi_name = self._get_cs_volume_iscsi_name(cs_volume.id) - self.assert_(cs_volume_iscsi_name == sf_util.format_iqn(sf_volume['iqn']), "The CloudStack volume iscsi_name does not match the SolidFire volume IQN.") + self.assert_(cs_volume_iscsi_name == sf_util.format_iqn(sf_volume.iqn), "The CloudStack volume iscsi_name does not match the SolidFire volume IQN.") def _get_cs_volume_property(self, cs_volume_id, volume_property): sql_query = "Select " + volume_property + " From volumes Where uuid = '" + cs_volume_id + "'" @@ -688,10 +692,10 @@ def _verify_no_basic_volume_details(self): sf_util.check_list(sql_result, 0, self, "The cloud.volume_details table should not have any name fields that start with 'basic_'.") def _verify_xenserver_state(self, xen_session_1, sf_volume_1, xen_session_2, sf_volume_2): - sr_name = sf_util.format_iqn(sf_volume_1["iqn"]) + sr_name = sf_util.format_iqn(sf_volume_1.iqn) sf_util.check_xen_sr(sr_name, xen_session_1, self, False) - sr_name = sf_util.format_iqn(sf_volume_2["iqn"]) + sr_name = sf_util.format_iqn(sf_volume_2.iqn) sf_util.check_xen_sr(sr_name, xen_session_2, self) diff --git a/test/integration/plugins/solidfire/TestVMSnapshots.py b/test/integration/plugins/solidfire/TestVMSnapshots.py index 14e8e71f7896..db2539025dd8 100644 --- a/test/integration/plugins/solidfire/TestVMSnapshots.py +++ b/test/integration/plugins/solidfire/TestVMSnapshots.py @@ -20,6 +20,8 @@ import SignedAPICall import XenAPI +from solidfire.factory import ElementFactory + from util import sf_util # All tests inherit from cloudstackTestCase @@ -36,8 +38,6 @@ # utils - utility classes for common cleanup, external library wrappers, etc. from marvin.lib.utils import cleanup_resources -from solidfire import solidfire_element_api as sf_api - # Prerequisites: # Only one zone # Only one pod @@ -63,7 +63,6 @@ class TestData: solidFire = "solidfire" storageTag = "SolidFire_SAN_1" tags = "tags" - templateName = "templatename" url = "url" user = "user" username = "username" @@ -76,7 +75,7 @@ def __init__(self): self.testdata = { TestData.solidFire: { TestData.mvip: "192.168.139.112", - TestData.login: "admin", + TestData.username: "admin", TestData.password: "admin", TestData.port: 443, TestData.url: "https://192.168.139.112:443" @@ -211,7 +210,6 @@ def __init__(self): "volume2": { "diskname": "testvolume2", }, - TestData.templateName: "CentOS 5.6(64-bit) no GUI (XenServer)", TestData.zoneId: 1, TestData.clusterId: 1, TestData.domainId: 1, @@ -237,7 +235,9 @@ class TestVMSnapshots(cloudstackTestCase): def setUpClass(cls): # Set up API client testclient = super(TestVMSnapshots, cls).getClsTestClient() + cls.apiClient = testclient.getApiClient() + cls.configData = testclient.getParsedTestDataConfig() cls.testdata = TestData().testdata @@ -252,11 +252,13 @@ def setUpClass(cls): cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) # Set up SolidFire connection - cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire]) + solidfire = cls.testdata[TestData.solidFire] + + cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) - template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName]) + template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account @@ -332,7 +334,7 @@ def tearDownClass(cls): cls.primary_storage.delete(cls.apiClient) - sf_util.purge_solidfire_volumes(cls.sf_client) + sf_util.purge_solidfire_volumes(cls.sfe) except Exception as e: logging.debug("Exception in tearDownClass(cls): %s" % e) diff --git a/test/integration/plugins/solidfire/TestVolumes.py b/test/integration/plugins/solidfire/TestVolumes.py index 63b9be116046..b70ac915beeb 100644 --- a/test/integration/plugins/solidfire/TestVolumes.py +++ b/test/integration/plugins/solidfire/TestVolumes.py @@ -20,6 +20,8 @@ import SignedAPICall import XenAPI +from solidfire.factory import ElementFactory + from util import sf_util # All tests inherit from cloudstackTestCase @@ -39,8 +41,6 @@ # utils - utility classes for common cleanup, external library wrappers, etc. from marvin.lib.utils import cleanup_resources -from solidfire import solidfire_element_api as sf_api - # Prerequisites: # Only one zone # Only one pod @@ -71,7 +71,6 @@ class TestData(): storageTag = "SolidFire_SAN_1" tags = "tags" templateCacheName = "centos56-x86-64-xen" - templateName = "templatename" testAccount = "testaccount" url = "url" user = "user" @@ -87,7 +86,7 @@ def __init__(self): self.testdata = { TestData.solidFire: { TestData.mvip: "192.168.139.112", - TestData.login: "admin", + TestData.username: "admin", TestData.password: "admin", TestData.port: 443, TestData.url: "https://192.168.139.112:443" @@ -168,7 +167,6 @@ def __init__(self): TestData.volume_2: { TestData.diskName: "test-volume-2", }, - TestData.templateName: "CentOS 5.6(64-bit) no GUI (XenServer)", TestData.zoneId: 1, TestData.clusterId: 1, TestData.domainId: 1, @@ -192,6 +190,7 @@ def setUpClass(cls): # Set up API client testclient = super(TestVolumes, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() + cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata @@ -212,12 +211,14 @@ def setUpClass(cls): cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) # Set up SolidFire connection - cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire]) + solidfire = cls.testdata[TestData.solidFire] + + cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] - cls.template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName]) + cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account @@ -304,7 +305,7 @@ def tearDownClass(cls): cls.primary_storage.delete(cls.apiClient) - sf_util.purge_solidfire_volumes(cls.sf_client) + sf_util.purge_solidfire_volumes(cls.sfe) except Exception as e: logging.debug("Exception in tearDownClass(cls): %s" % e) @@ -328,16 +329,16 @@ def test_00_check_template_cache(self): sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, TestData.templateCacheName, self) self.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 0, "The volume should not be in a VAG." ) - sf_account_id = sf_volume["accountID"] + sf_account_id = sf_volume.account_id - sf_account = self.sf_client.get_account_by_id(sf_account_id)["account"] + sf_account = self.sfe.get_account_by_id(sf_account_id).account - sf_account_name = sf_account["username"] + sf_account_name = sf_account.username self.assertEqual( sf_account_name.endswith("_1"), @@ -504,7 +505,7 @@ def test_02_attach_detach_attach_volume(self): sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) self.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 0, "The volume should not be in a VAG." ) @@ -723,7 +724,7 @@ def test_04_detach_volume_reboot(self): sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) self.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 0, TestVolumes._volume_should_not_be_in_a_vag ) @@ -747,7 +748,7 @@ def test_04_detach_volume_reboot(self): sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) self.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 0, TestVolumes._volume_should_not_be_in_a_vag ) @@ -847,7 +848,7 @@ def test_05_detach_vol_stopped_VM_start(self): sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) self.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 0, TestVolumes._volume_should_not_be_in_a_vag ) @@ -871,7 +872,7 @@ def test_05_detach_vol_stopped_VM_start(self): sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self) self.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 0, TestVolumes._volume_should_not_be_in_a_vag ) @@ -1075,7 +1076,7 @@ def test_07_destroy_expunge_VM_with_volume(self): sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) self.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 0, TestVolumes._volume_should_not_be_in_a_vag ) @@ -1182,7 +1183,7 @@ def test_08_delete_volume_was_attached(self): sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self) self.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 0, TestVolumes._volume_should_not_be_in_a_vag ) @@ -1489,7 +1490,7 @@ def _check_xen_sr(self, xen_sr_name, should_exist=True): sf_util.check_xen_sr(xen_sr_name, self.xen_session, self, should_exist) def _get_active_sf_volumes(self, sf_account_id=None): - sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id) + sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) self.assertNotEqual( len(sf_volumes), diff --git a/test/integration/plugins/solidfire/util/sf_util.py b/test/integration/plugins/solidfire/util/sf_util.py index 662957103330..1b451d5639f4 100644 --- a/test/integration/plugins/solidfire/util/sf_util.py +++ b/test/integration/plugins/solidfire/util/sf_util.py @@ -68,20 +68,20 @@ def _set_supports_resign_for_table(supports_resign, db_connection, table): # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench db_connection.execute(sql_query) -def purge_solidfire_volumes(sf_client): - deleted_volumes = sf_client.list_deleted_volumes() +def purge_solidfire_volumes(sfe): + deleted_volumes = sfe.list_deleted_volumes() - for deleted_volume in deleted_volumes: - sf_client.purge_deleted_volume(deleted_volume['volumeID']) + for deleted_volume in deleted_volumes.volumes: + sfe.purge_deleted_volume(deleted_volume.volume_id) -def get_not_active_sf_volumes(sf_client, sf_account_id=None): +def get_not_active_sf_volumes(sfe, sf_account_id=None): if sf_account_id is not None: - sf_volumes = sf_client.list_volumes_for_account(sf_account_id) + sf_volumes = sfe.list_volumes_for_account(sf_account_id).volumes if sf_volumes is not None and len(sf_volumes) > 0: sf_volumes = _get_not_active_sf_volumes_only(sf_volumes) else: - sf_volumes = sf_client.list_deleted_volumes() + sf_volumes = sfe.list_deleted_volumes().volumes return sf_volumes @@ -89,19 +89,19 @@ def _get_not_active_sf_volumes_only(sf_volumes): not_active_sf_volumes_only = [] for sf_volume in sf_volumes: - if sf_volume["status"] != "active": + if sf_volume.status != "active": not_active_sf_volumes_only.append(sf_volume) return not_active_sf_volumes_only -def get_active_sf_volumes(sf_client, sf_account_id=None): +def get_active_sf_volumes(sfe, sf_account_id=None): if sf_account_id is not None: - sf_volumes = sf_client.list_volumes_for_account(sf_account_id) + sf_volumes = sfe.list_volumes_for_account(sf_account_id).volumes if sf_volumes is not None and len(sf_volumes) > 0: sf_volumes = _get_active_sf_volumes_only(sf_volumes) else: - sf_volumes = sf_client.list_active_volumes() + sf_volumes = sfe.list_active_volumes().volumes return sf_volumes @@ -109,7 +109,7 @@ def _get_active_sf_volumes_only(sf_volumes): active_sf_volumes_only = [] for sf_volume in sf_volumes: - if sf_volume["status"] == "active": + if sf_volume.status == "active": active_sf_volumes_only.append(sf_volume) return active_sf_volumes_only @@ -118,7 +118,7 @@ def check_and_get_sf_volume(sf_volumes, sf_volume_name, obj_assert, should_exist sf_volume = None for volume in sf_volumes: - if volume['name'] == sf_volume_name: + if volume.name == sf_volume_name: sf_volume = volume break @@ -155,13 +155,13 @@ def check_xen_sr(xen_sr_name, xen_session, obj_assert, should_exist=True): def check_vag(sf_volume, sf_vag_id, obj_assert): obj_assert.assertEqual( - len(sf_volume['volumeAccessGroups']), + len(sf_volume.volume_access_groups), 1, "The volume should only be in one VAG." ) obj_assert.assertEqual( - sf_volume['volumeAccessGroups'][0], + sf_volume.volume_access_groups[0], sf_vag_id, "The volume is not in the VAG with the following ID: " + str(sf_vag_id) + "." ) @@ -185,21 +185,21 @@ def format_iqn(iqn): def check_size_and_iops(sf_volume, cs_volume, size, obj_assert): obj_assert.assertEqual( - sf_volume['qos']['minIOPS'], + sf_volume.qos.min_iops, cs_volume.miniops, - "Check QoS - Min IOPS: " + str(sf_volume['qos']['minIOPS']) + "Check QoS - Min IOPS: " + str(sf_volume.qos.min_iops) ) obj_assert.assertEqual( - sf_volume['qos']['maxIOPS'], + sf_volume.qos.max_iops, cs_volume.maxiops, - "Check QoS - Max IOPS: " + str(sf_volume['qos']['maxIOPS']) + "Check QoS - Max IOPS: " + str(sf_volume.qos.max_iops) ) obj_assert.assertEqual( - sf_volume['totalSize'], + sf_volume.total_size, size, - "Check SolidFire volume size: " + str(sf_volume['totalSize']) + "Check SolidFire volume size: " + str(sf_volume.total_size) ) def get_volume_size_with_hsr(cs_api, cs_volume, obj_assert): From 16913a982253a5b658ca8ee7a0c8939f13505d45 Mon Sep 17 00:00:00 2001 From: Rohit Yadav Date: Tue, 30 Aug 2016 15:55:00 +0530 Subject: [PATCH 034/687] CLOUDSTACK-9842: Make UI JSP Free We use some JSP file just for translation of strings in the UI. This is achievable purely in JavaScript. This removes those JSPs, simplifies translation usage and workflow (purely JS based). The l10n js (dictionary) files are generated from existing messages.properties files during client-ui code generation phase. Signed-off-by: Rohit Yadav --- client/WEB-INF/web.xml | 2 +- client/pom.xml | 26 + client/tomcatconf/web.xml.in | 1 - packaging/centos7/tomcat7/web.xml | 1 - pom.xml | 7 +- scripts/installer/windows/client.wxs | 6 - server/pom.xml | 10 - tools/transifex/gen-l10n.py | 106 +++ ui/dictionary.jsp | 1158 -------------------------- ui/dictionary2.jsp | 1131 ------------------------- ui/{error.jsp => error.html} | 4 +- ui/{index.jsp => index.html} | 675 ++++++++------- 12 files changed, 495 insertions(+), 2632 deletions(-) create mode 100644 tools/transifex/gen-l10n.py delete mode 100644 ui/dictionary.jsp delete mode 100644 ui/dictionary2.jsp rename ui/{error.jsp => error.html} (99%) rename ui/{index.jsp => index.html} (74%) diff --git a/client/WEB-INF/web.xml b/client/WEB-INF/web.xml index a384f0647946..73a755b501f6 100644 --- a/client/WEB-INF/web.xml +++ b/client/WEB-INF/web.xml @@ -80,7 +80,7 @@ java.lang.Exception - /error.jsp + /error.html diff --git a/client/pom.xml b/client/pom.xml index f6d6fdce3095..5787717be2e2 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -390,6 +390,32 @@ + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + generate-sources + generate-sources + + exec + + + ${basedir}/marvin + python + + ${basedir}/../tools/transifex/gen-l10n.py + -i + ${basedir}/WEB-INF/classes/resources/ + -o + ${basedir}/../ui/l10n/ + Generating JS localization + + + + + ru.concerteza.buildnumber maven-jgit-buildnumber-plugin diff --git a/client/tomcatconf/web.xml.in b/client/tomcatconf/web.xml.in index 44b6eab07fe4..8645a64da424 100644 --- a/client/tomcatconf/web.xml.in +++ b/client/tomcatconf/web.xml.in @@ -1182,7 +1182,6 @@ index.html index.htm - index.jsp diff --git a/packaging/centos7/tomcat7/web.xml b/packaging/centos7/tomcat7/web.xml index 44b6eab07fe4..8645a64da424 100644 --- a/packaging/centos7/tomcat7/web.xml +++ b/packaging/centos7/tomcat7/web.xml @@ -1182,7 +1182,6 @@ index.html index.htm - index.jsp diff --git a/pom.xml b/pom.xml index e68191134e23..d0fbbd52d7f2 100644 --- a/pom.xml +++ b/pom.xml @@ -129,7 +129,6 @@ 9.2.15.v20160210 3.1.4 2.4.6 - 9.2.15.v20160210 10.1 @@ -222,11 +221,6 @@ - - org.eclipse.jetty - apache-jsp - ${cs.apache-jsp.version} - org.codehaus.groovy groovy-all @@ -876,6 +870,7 @@ tools/devcloud/src/deps/boxes/basebox-build/preseed.cfg tools/marvin/Marvin.egg-info/* ui/css/token-input-facebook.css + ui/l10n/* ui/lib/flot/jquery.colorhelpers.js ui/lib/flot/jquery.flot.crosshair.js ui/lib/flot/jquery.flot.fillbetween.js diff --git a/scripts/installer/windows/client.wxs b/scripts/installer/windows/client.wxs index f5aec48bde49..91b15fbd950b 100644 --- a/scripts/installer/windows/client.wxs +++ b/scripts/installer/windows/client.wxs @@ -12,12 +12,6 @@ - - - - - - diff --git a/server/pom.xml b/server/pom.xml index ae3b7387f462..e379fc61f0c8 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -69,16 +69,6 @@ javax.mail mail - - jstl - jstl - ${cs.jstl.version} - - - javax.servlet.jsp.jstl - javax.servlet.jsp.jstl-api - ${cs.jstl-api.version} - commons-codec commons-codec diff --git a/tools/transifex/gen-l10n.py b/tools/transifex/gen-l10n.py new file mode 100644 index 000000000000..0f1616bea3f2 --- /dev/null +++ b/tools/transifex/gen-l10n.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Usage: python gen-l10n.py + +import codecs +import json +import os +import sys +from optparse import OptionParser + + +def generateL10nFile(propFile, outputFile): + ts = {} + with open(propFile, 'r') as f: + for line in f.read().split('\n'): + if line.startswith('#') or line.startswith('\n') or line.startswith('\r') or line.strip() == "": + continue + key, _, value = line.partition('=') + if key in ts: + print("[Warning] Found a duplicate translation for key " + key) + value = value.replace('\#', '#') \ + .replace('\=', '=') \ + .replace('\!', '!') \ + .replace('\:', ':') \ + .replace('\+', '+') \ + .replace('\,', ',') \ + .replace('\>', '>') \ + .replace('\<', '<') \ + .replace('\\>', '>') \ + .replace('\\<', '<') \ + .replace('\\,', ',') \ + .replace('\\ ', ' ') \ + .replace('\\+', '+') \ + .replace('\\\\', '') \ + .decode('unicode-escape') + ts[key] = value + + print("Exporting compiled dictionary: %s" % outputFile) + with codecs.open(outputFile, "w", "utf-8") as f: + f.write("// Licensed to the Apache Software Foundation (ASF) under one\n") + f.write("// or more contributor license agreements. See the NOTICE file\n") + f.write("// distributed with this work for additional information\n") + f.write("// regarding copyright ownership. The ASF licenses this file\n") + f.write("// to you under the Apache License, Version 2.0 (the\n") + f.write("// \"License\"); you may not use this file except in compliance\n") + f.write("// with the License. You may obtain a copy of the License at\n") + f.write("//\n") + f.write("// http://www.apache.org/licenses/LICENSE-2.0\n") + f.write("//\n") + f.write("// Unless required by applicable law or agreed to in writing,\n") + f.write("// software distributed under the License is distributed on an\n") + f.write("// \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n") + f.write("// KIND, either express or implied. See the License for the\n") + f.write("// specific language governing permissions and limitations\n") + f.write("// under the License.\n") + f.write("var dictionary = ") + f.write(json.dumps(ts, ensure_ascii=False, separators=(',\n', ':',), sort_keys=True)) + f.write(";") + + +def parseFileName(propFileName): + return propFileName.split('messages_')[-1] \ + .replace('properties', 'js') \ + .replace('messages.js', 'en.js') + + +def main(): + parser = OptionParser() + parser.add_option("-o", "--output", dest="outputDir", + help="The path to the generated l10n js file") + + parser.add_option("-i", "--input", dest="inputDir", + help="The path to source messages properties files") + + (options, args) = parser.parse_args() + if options.inputDir is None or options.outputDir is None: + print("Please provide messages and l10n output directory paths") + sys.exit(1) + + if not os.path.exists(options.outputDir): + os.makedirs(options.outputDir) + + for propFile in os.listdir(options.inputDir): + inputFile = "%s/%s" % (options.inputDir, propFile) + outputFile = "%s/%s" % (options.outputDir, parseFileName(propFile)) + generateL10nFile(inputFile, outputFile) + + +if __name__ == "__main__": + main() diff --git a/ui/dictionary.jsp b/ui/dictionary.jsp deleted file mode 100644 index c15dae2ebdf5..000000000000 --- a/ui/dictionary.jsp +++ /dev/null @@ -1,1158 +0,0 @@ -<%-- -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. ---%> -<%@ taglib uri="http://java.sun.com/jsp/jstl/core" prefix="c" %> -<%@ taglib uri="http://java.sun.com/jsp/jstl/fmt" prefix="fmt" %> - - - - - -<%-- -**** -NOTE -**** - -Please use dictionary2.jsp for all new mappings. This is due to -file size constraints for JSP files. - -If you add anything else to this file, an error might occur at runtime! ---%> - -<% long now = System.currentTimeMillis(); %> - diff --git a/ui/dictionary2.jsp b/ui/dictionary2.jsp deleted file mode 100644 index f919b156d72d..000000000000 --- a/ui/dictionary2.jsp +++ /dev/null @@ -1,1131 +0,0 @@ -<%-- -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. ---%> -<%@ taglib uri="http://java.sun.com/jsp/jstl/core" prefix="c" %> -<%@ taglib uri="http://java.sun.com/jsp/jstl/fmt" prefix="fmt" %> - - - - -<% long now = System.currentTimeMillis(); %> - diff --git a/ui/error.jsp b/ui/error.html similarity index 99% rename from ui/error.jsp rename to ui/error.html index 9fecfb730c7d..130fa6ed4576 100644 --- a/ui/error.jsp +++ b/ui/error.html @@ -1,4 +1,4 @@ -<%-- + diff --git a/ui/index.jsp b/ui/index.html similarity index 74% rename from ui/index.jsp rename to ui/index.html index 34f6310f9c94..9402153937a4 100644 --- a/ui/index.jsp +++ b/ui/index.html @@ -1,5 +1,4 @@ - -<%-- + @@ -34,13 +26,10 @@ - - - - + @@ -54,52 +43,35 @@
- +
- +
- +
- " /> +
- - - - - - - - - - - - - - - - -
@@ -109,14 +81,14 @@
    -
  • 1
  • -
  • 2
  • -
  • 3
  • -
  • 4
  • -
  • 5
  • -
  • 6
  • -
  • 7
  • -
  • 8
  • +
  • 1
  • +
  • 2
  • +
  • 3
  • +
  • 4
  • +
  • 5
  • +
  • 6
  • +
  • 7
  • +
  • 8
@@ -126,26 +98,25 @@
-

-

+

+

-

+

-
+
- +
-
+
@@ -157,14 +128,14 @@

-

+

    -
  • -
  • -
  • -
  • +
  • +
  • +
  • +
@@ -189,7 +160,7 @@

- +
@@ -198,14 +169,14 @@

-

+

    -
  • -
  • -
  • -
  • +
  • +
  • +
  • +