Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ public void config() {

@Override
public PrimaryDataStore getPrimaryDataStore(long dataStoreId) {
StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStoreId);
StoragePoolVO dataStoreVO = dataStoreDao.findByIdIncludingRemoved(dataStoreId);
if (dataStoreVO == null) {
throw new CloudRuntimeException("Unable to locate datastore with id " + dataStoreId);
}
Expand Down
83 changes: 63 additions & 20 deletions test/integration/smoke/test_snapshots.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,16 @@
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import (cleanup_resources,
is_snapshot_on_nfs)
is_snapshot_on_nfs,
validateList)
from marvin.lib.base import (VirtualMachine,
Account,
Template,
ServiceOffering,
Snapshot,
StoragePool,
Volume)
Volume,
DiskOffering)
from marvin.lib.common import (get_domain,
get_template,
get_zone,
Expand All @@ -36,6 +38,7 @@
list_storage_pools,
list_clusters)
from marvin.lib.decoratorGenerators import skipTestIf
from marvin.codes import PASS


class Templates:
Expand Down Expand Up @@ -134,6 +137,10 @@ def setUpClass(cls):
cls.apiclient,
cls.services["service_offerings"]["tiny"]
)
cls.disk_offering = DiskOffering.create(
cls.apiclient,
cls.services["disk_offering"]
)
cls.virtual_machine = cls.virtual_machine_with_disk = \
VirtualMachine.create(
cls.apiclient,
Expand All @@ -149,6 +156,7 @@ def setUpClass(cls):
cls._cleanup.append(cls.service_offering)
cls._cleanup.append(cls.account)
cls._cleanup.append(cls.template)
cls._cleanup.append(cls.disk_offering)
return

@classmethod
Expand Down Expand Up @@ -267,19 +275,50 @@ def test_02_list_snapshots_with_removed_data_store(self):
"""Test listing volume snapshots with removed data stores
"""

# 1) Create new Primary Storage
# 1 - Create new volume -> V
# 2 - Create new Primary Storage -> PS
# 3 - Attach and detach volume V from vm
# 4 - Migrate volume V to PS
# 5 - Take volume V snapshot -> S
# 6 - List snapshot and verify it gets properly listed although Primary Storage was removed

# Create new volume
vol = Volume.create(
self.apiclient,
self.services["volume"],
diskofferingid=self.disk_offering.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
)
self.cleanup.append(vol)
self.assertIsNotNone(vol, "Failed to create volume")
vol_res = Volume.list(
self.apiclient,
id=vol.id
)
self.assertEqual(
validateList(vol_res)[0],
PASS,
"Invalid response returned for list volumes")
vol_uuid = vol_res[0].id

# Create new Primary Storage
clusters = list_clusters(
self.apiclient,
zoneid=self.zone.id
)
assert isinstance(clusters,list) and len(clusters)>0

storage = StoragePool.create(self.apiclient,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think It's good append this storage to cleanup as soon as it's created. I've hit the exact issue that would leave it added in Cloudstack. I've a test failure before the point it is appended and cleanup() didn't wiped it.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done, thanks for pointing it out!

self.services["nfs"],
self.services["nfs2"],
clusterid=clusters[0].id,
zoneid=self.zone.id,
podid=self.pod.id
)
self.cleanup.append(self.virtual_machine_with_disk)
self.cleanup.append(storage)

self.assertEqual(
storage.state,
'Up',
Expand Down Expand Up @@ -314,22 +353,26 @@ def test_02_list_snapshots_with_removed_data_store(self):
"Check storage pool type "
)

# 2) Migrate VM ROOT volume to new Primary Storage
volumes = list_volumes(
# Attach created volume to vm, then detach it to be able to migrate it
self.virtual_machine_with_disk.stop(self.apiclient)
self.virtual_machine_with_disk.attach_volume(
self.apiclient,
virtualmachineid=self.virtual_machine_with_disk.id,
type='ROOT',
listall=True
vol
)
self.virtual_machine_with_disk.detach_volume(
self.apiclient,
vol
)

# Migrate volume to new Primary Storage
Volume.migrate(self.apiclient,
storageid=storage.id,
volumeid=volumes[0].id,
livemigrate="true"
)
storageid=storage.id,
volumeid=vol.id
)

volume_response = list_volumes(
self.apiclient,
id=volumes[0].id,
id=vol.id,
)
self.assertNotEqual(
len(volume_response),
Expand All @@ -342,22 +385,21 @@ def test_02_list_snapshots_with_removed_data_store(self):
storage.id,
"Check volume storage id"
)
self.cleanup.append(self.virtual_machine_with_disk)
self.cleanup.append(storage)

# 3) Take snapshot of VM ROOT volume
# Take snapshot of new volume
snapshot = Snapshot.create(
self.apiclient,
volume_migrated.id,
account=self.account.name,
domainid=self.account.domainid
)

self.debug("Snapshot created: ID - %s" % snapshot.id)

# 4) Delete VM and created Primery Storage
# Delete volume, VM and created Primary Storage
cleanup_resources(self.apiclient, self.cleanup)

# 5) List snapshot and verify it gets properly listed although Primary Storage was removed
# List snapshot and verify it gets properly listed although Primary Storage was removed
snapshot_response = Snapshot.list(
self.apiclient,
id=snapshot.id
Expand All @@ -373,10 +415,11 @@ def test_02_list_snapshots_with_removed_data_store(self):
"Check snapshot id"
)

# 6) Delete snapshot and verify it gets properly deleted (should not be listed)
# Delete snapshot and verify it gets properly deleted (should not be listed)
self.cleanup = [snapshot]
cleanup_resources(self.apiclient, self.cleanup)

self.cleanup = []
snapshot_response_2 = Snapshot.list(
self.apiclient,
id=snapshot.id
Expand Down
4 changes: 4 additions & 0 deletions tools/marvin/marvin/config/test_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -962,6 +962,10 @@
"url": "nfs://nfs/export/automation/1/testprimary",
"name": "Primary XEN"
},
"nfs2": {
"url": "nfs://nfs/export/automation/1/testprimary2",
"name": "Primary XEN 2"
},
"iscsi": {
"url":
"iscsi://192.168.100.21/iqn.2012-01.localdomain.clo-cstack-cos6:iser/1",
Expand Down