From b792df163a1953772aa9a4c293b87cba5826a953 Mon Sep 17 00:00:00 2001 From: nvazquez Date: Wed, 22 Feb 2017 15:54:02 -0300 Subject: [PATCH 1/2] Fix for test_snapshots.py using nfs2 instead of nfs template --- test/integration/smoke/test_snapshots.py | 2 +- tools/marvin/marvin/config/test_data.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/test/integration/smoke/test_snapshots.py b/test/integration/smoke/test_snapshots.py index a6230bc4940b..e212e3e97831 100644 --- a/test/integration/smoke/test_snapshots.py +++ b/test/integration/smoke/test_snapshots.py @@ -275,7 +275,7 @@ def test_02_list_snapshots_with_removed_data_store(self): assert isinstance(clusters,list) and len(clusters)>0 storage = StoragePool.create(self.apiclient, - self.services["nfs"], + self.services["nfs2"], clusterid=clusters[0].id, zoneid=self.zone.id, podid=self.pod.id diff --git a/tools/marvin/marvin/config/test_data.py b/tools/marvin/marvin/config/test_data.py index c4d1aed3dafd..215cdaecc3f9 100644 --- a/tools/marvin/marvin/config/test_data.py +++ b/tools/marvin/marvin/config/test_data.py @@ -962,6 +962,10 @@ "url": "nfs://nfs/export/automation/1/testprimary", "name": "Primary XEN" }, + "nfs2": { + "url": "nfs://nfs/export/automation/1/testprimary2", + "name": "Primary XEN 2" + }, "iscsi": { "url": "iscsi://192.168.100.21/iqn.2012-01.localdomain.clo-cstack-cos6:iser/1", From c66df6e11f766844c4566543eb4b95945b2e7873 Mon Sep 17 00:00:00 2001 From: nvazquez Date: Wed, 1 Mar 2017 12:17:59 -0300 Subject: [PATCH 2/2] Fix for test failure --- .../PrimaryDataStoreProviderManagerImpl.java | 2 +- test/integration/smoke/test_snapshots.py | 81 ++++++++++++++----- 2 files changed, 63 insertions(+), 20 deletions(-) diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/PrimaryDataStoreProviderManagerImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/PrimaryDataStoreProviderManagerImpl.java index 49bcb5b69811..b799c8be389d 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/PrimaryDataStoreProviderManagerImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/manager/PrimaryDataStoreProviderManagerImpl.java @@ -56,7 +56,7 @@ public void config() { @Override public PrimaryDataStore getPrimaryDataStore(long dataStoreId) { - StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStoreId); + StoragePoolVO dataStoreVO = dataStoreDao.findByIdIncludingRemoved(dataStoreId); if (dataStoreVO == null) { throw new CloudRuntimeException("Unable to locate datastore with id " + dataStoreId); } diff --git a/test/integration/smoke/test_snapshots.py b/test/integration/smoke/test_snapshots.py index e212e3e97831..57612627f270 100644 --- a/test/integration/smoke/test_snapshots.py +++ b/test/integration/smoke/test_snapshots.py @@ -19,14 +19,16 @@ from nose.plugins.attrib import attr from marvin.cloudstackTestCase import cloudstackTestCase from marvin.lib.utils import (cleanup_resources, - is_snapshot_on_nfs) + is_snapshot_on_nfs, + validateList) from marvin.lib.base import (VirtualMachine, Account, Template, ServiceOffering, Snapshot, StoragePool, - Volume) + Volume, + DiskOffering) from marvin.lib.common import (get_domain, get_template, get_zone, @@ -36,6 +38,7 @@ list_storage_pools, list_clusters) from marvin.lib.decoratorGenerators import skipTestIf +from marvin.codes import PASS class Templates: @@ -134,6 +137,10 @@ def setUpClass(cls): cls.apiclient, cls.services["service_offerings"]["tiny"] ) + cls.disk_offering = DiskOffering.create( + cls.apiclient, + cls.services["disk_offering"] + ) cls.virtual_machine = cls.virtual_machine_with_disk = \ VirtualMachine.create( cls.apiclient, @@ -149,6 +156,7 @@ def setUpClass(cls): cls._cleanup.append(cls.service_offering) cls._cleanup.append(cls.account) cls._cleanup.append(cls.template) + cls._cleanup.append(cls.disk_offering) return @classmethod @@ -267,7 +275,35 @@ def test_02_list_snapshots_with_removed_data_store(self): """Test listing volume snapshots with removed data stores """ - # 1) Create new Primary Storage + # 1 - Create new volume -> V + # 2 - Create new Primary Storage -> PS + # 3 - Attach and detach volume V from vm + # 4 - Migrate volume V to PS + # 5 - Take volume V snapshot -> S + # 6 - List snapshot and verify it gets properly listed although Primary Storage was removed + + # Create new volume + vol = Volume.create( + self.apiclient, + self.services["volume"], + diskofferingid=self.disk_offering.id, + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid, + ) + self.cleanup.append(vol) + self.assertIsNotNone(vol, "Failed to create volume") + vol_res = Volume.list( + self.apiclient, + id=vol.id + ) + self.assertEqual( + validateList(vol_res)[0], + PASS, + "Invalid response returned for list volumes") + vol_uuid = vol_res[0].id + + # Create new Primary Storage clusters = list_clusters( self.apiclient, zoneid=self.zone.id @@ -280,6 +316,9 @@ def test_02_list_snapshots_with_removed_data_store(self): zoneid=self.zone.id, podid=self.pod.id ) + self.cleanup.append(self.virtual_machine_with_disk) + self.cleanup.append(storage) + self.assertEqual( storage.state, 'Up', @@ -314,22 +353,26 @@ def test_02_list_snapshots_with_removed_data_store(self): "Check storage pool type " ) - # 2) Migrate VM ROOT volume to new Primary Storage - volumes = list_volumes( + # Attach created volume to vm, then detach it to be able to migrate it + self.virtual_machine_with_disk.stop(self.apiclient) + self.virtual_machine_with_disk.attach_volume( self.apiclient, - virtualmachineid=self.virtual_machine_with_disk.id, - type='ROOT', - listall=True + vol ) + self.virtual_machine_with_disk.detach_volume( + self.apiclient, + vol + ) + + # Migrate volume to new Primary Storage Volume.migrate(self.apiclient, - storageid=storage.id, - volumeid=volumes[0].id, - livemigrate="true" - ) + storageid=storage.id, + volumeid=vol.id + ) volume_response = list_volumes( self.apiclient, - id=volumes[0].id, + id=vol.id, ) self.assertNotEqual( len(volume_response), @@ -342,22 +385,21 @@ def test_02_list_snapshots_with_removed_data_store(self): storage.id, "Check volume storage id" ) - self.cleanup.append(self.virtual_machine_with_disk) - self.cleanup.append(storage) - # 3) Take snapshot of VM ROOT volume + # Take snapshot of new volume snapshot = Snapshot.create( self.apiclient, volume_migrated.id, account=self.account.name, domainid=self.account.domainid ) + self.debug("Snapshot created: ID - %s" % snapshot.id) - # 4) Delete VM and created Primery Storage + # Delete volume, VM and created Primary Storage cleanup_resources(self.apiclient, self.cleanup) - # 5) List snapshot and verify it gets properly listed although Primary Storage was removed + # List snapshot and verify it gets properly listed although Primary Storage was removed snapshot_response = Snapshot.list( self.apiclient, id=snapshot.id @@ -373,10 +415,11 @@ def test_02_list_snapshots_with_removed_data_store(self): "Check snapshot id" ) - # 6) Delete snapshot and verify it gets properly deleted (should not be listed) + # Delete snapshot and verify it gets properly deleted (should not be listed) self.cleanup = [snapshot] cleanup_resources(self.apiclient, self.cleanup) + self.cleanup = [] snapshot_response_2 = Snapshot.list( self.apiclient, id=snapshot.id