diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2f9bc4b4af1b..13e63c8f5651 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,14 +14,14 @@ Bug fixes It's very important that we can easily track bug fix commits, so their hashes should remain the same in all branches. Therefore, a pull request (PR) that fixes a bug, should be sent against a release branch. This can be either the "current release" or the "previous release", depending on which ones are maintained. -Since the goal is a stable master, bug fixes should be "merged forward" to the next branch in order: "previous release" -> "current release" -> master (in other words: old to new) +Since the goal is a stable main, bug fixes should be "merged forward" to the next branch in order: "previous release" -> "current release" -> main (in other words: old to new) Developing new features ----------------------- -Development should be done in a feature branch, branched off of master. -Send a PR(steps below) to get it into master (2x LGTM applies). -PR will only be merged when master is open, will be held otherwise until master is open again. +Development should be done in a feature branch, branched off of main. +Send a PR(steps below) to get it into main (2x LGTM applies). +PR will only be merged when main is open, will be held otherwise until main is open again. No back porting / cherry-picking features to existing branches! PendingReleaseNotes file @@ -46,9 +46,9 @@ On your computer, follow these steps to setup a local repository for working on $ git clone https://github.com/YOUR_ACCOUNT/cloudstack.git $ cd cloudstack $ git remote add upstream https://github.com/apache/cloudstack.git -$ git checkout master +$ git checkout main $ git fetch upstream -$ git rebase upstream/master +$ git rebase upstream/main ``` @@ -56,7 +56,7 @@ Making changes -------------- -It is important that you create a new branch to make changes on and that you do not change the `master` branch (other than to rebase in changes from `upstream/master`). In this example I will assume you will be making your changes to a branch called `feature_x`. This `feature_x` branch will be created on your local repository and will be pushed to your forked repository on GitHub. Once this branch is on your fork you will create a Pull Request for the changes to be added to the ACS project. +It is important that you create a new branch to make changes on and that you do not change the `main` branch (other than to rebase in changes from `upstream/main`). In this example I will assume you will be making your changes to a branch called `feature_x`. This `feature_x` branch will be created on your local repository and will be pushed to your forked repository on GitHub. Once this branch is on your fork you will create a Pull Request for the changes to be added to the ACS project. It is best practice to create a new branch each time you want to contribute to the project and only track the changes for that pull request in this branch. @@ -71,26 +71,26 @@ $ git commit -a -m "descriptive commit message for your changes" > The `-b` specifies that you want to create a new branch called `feature_x`. You only specify `-b` the first time you checkout because you are creating a new branch. Once the `feature_x` branch exists, you can later switch to it with only `git checkout feature_x`. -Rebase `feature_x` to include updates from `upstream/master` +Rebase `feature_x` to include updates from `upstream/main` ------------------------------------------------------------ -It is important that you maintain an up-to-date `master` branch in your local repository. This is done by rebasing in the code changes from `upstream/master` (the official ACS project repository) into your local repository. You will want to do this before you start working on a feature as well as right before you submit your changes as a pull request. I recommend you do this process periodically while you work to make sure you are working off the most recent project code. +It is important that you maintain an up-to-date `main` branch in your local repository. This is done by rebasing in the code changes from `upstream/main` (the official ACS project repository) into your local repository. You will want to do this before you start working on a feature as well as right before you submit your changes as a pull request. I recommend you do this process periodically while you work to make sure you are working off the most recent project code. This process will do the following: -1. Checkout your local `master` branch -2. Synchronize your local `master` branch with the `upstream/master` so you have all the latest changes from the project +1. Checkout your local `main` branch +2. Synchronize your local `main` branch with the `upstream/main` so you have all the latest changes from the project 3. Rebase the latest project code into your `feature_x` branch so it is up-to-date with the upstream code ``` bash -$ git checkout master +$ git checkout main $ git fetch upstream -$ git rebase upstream/master +$ git rebase upstream/main $ git checkout feature_x -$ git rebase master +$ git rebase main ``` -> Now your `feature_x` branch is up-to-date with all the code in `upstream/master`. +> Now your `feature_x` branch is up-to-date with all the code in `upstream/main`. Make a GitHub Pull Request to contribute your changes @@ -100,10 +100,10 @@ When you are happy with your changes and you are ready to contribute them, you w Please include JIRA id, detailed information about the bug/feature, what all tests are executed, how the reviewer can test this feature etc. Incase of UI PRs, a screenshot is preferred. -> **IMPORTANT:** Make sure you have rebased your `feature_x` branch to include the latest code from `upstream/master` _before_ you do this. +> **IMPORTANT:** Make sure you have rebased your `feature_x` branch to include the latest code from `upstream/main` _before_ you do this. ``` bash -$ git push origin master +$ git push origin main $ git push origin feature_x ``` @@ -113,7 +113,7 @@ To initiate the pull request, do the following: 1. In your browser, navigate to your forked repository: [https://github.com/YOUR_ACCOUNT/cloudstack](https://github.com/YOUR_ACCOUNT/cloudstack) 2. Click the new button called '**Compare & pull request**' that showed up just above the main area in your forked repository -3. Validate the pull request will be into the upstream `master` and will be from your `feature_x` branch +3. Validate the pull request will be into the upstream `main` and will be from your `feature_x` branch 4. Enter a detailed description of the work you have done and then click '**Send pull request**' If you are requested to make modifications to your proposed changes, make the changes locally on your `feature_x` branch, re-push the `feature_x` branch to your fork. The existing pull request should automatically pick up the change and update accordingly. @@ -122,14 +122,14 @@ If you are requested to make modifications to your proposed changes, make the ch Cleaning up after a successful pull request ------------------------------------------- -Once the `feature_x` branch has been committed into the `upstream/master` branch, your local `feature_x` branch and the `origin/feature_x` branch are no longer needed. If you want to make additional changes, restart the process with a new branch. +Once the `feature_x` branch has been committed into the `upstream/main` branch, your local `feature_x` branch and the `origin/feature_x` branch are no longer needed. If you want to make additional changes, restart the process with a new branch. -> **IMPORTANT:** Make sure that your changes are in `upstream/master` before you delete your `feature_x` and `origin/feature_x` branches! +> **IMPORTANT:** Make sure that your changes are in `upstream/main` before you delete your `feature_x` and `origin/feature_x` branches! You can delete these deprecated branches with the following: ``` bash -$ git checkout master +$ git checkout main $ git branch -D feature_x $ git push origin :feature_x ``` diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index 216273211420..85c1ac9ec6a9 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -1,6 +1,6 @@ @@ -23,7 +23,7 @@ Categorize the issue, e.g. API, VR, VPN, UI, etc. ##### CLOUDSTACK VERSION ~~~ diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md index 858f100d01be..16345c1d723a 100644 --- a/PULL_REQUEST_TEMPLATE.md +++ b/PULL_REQUEST_TEMPLATE.md @@ -48,4 +48,4 @@ This PR... - + diff --git a/README.md b/README.md index e2bb618f0911..3d9bc3d4fe36 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Apache CloudStack [![Build Status](https://travis-ci.org/apache/cloudstack.svg?branch=master)](https://travis-ci.org/apache/cloudstack) [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=apachecloudstack&metric=alert_status)](https://sonarcloud.io/dashboard?id=apachecloudstack) [![Lines of Code](https://sonarcloud.io/api/project_badges/measure?project=apachecloudstack&metric=ncloc)](https://sonarcloud.io/dashboard?id=apachecloudstack) ![GitHub language count](https://img.shields.io/github/languages/count/apache/cloudstack.svg) ![GitHub top language](https://img.shields.io/github/languages/top/apache/cloudstack.svg) +# Apache CloudStack [![Build Status](https://travis-ci.org/apache/cloudstack.svg?branch=main)](https://travis-ci.org/apache/cloudstack) [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=apachecloudstack&metric=alert_status)](https://sonarcloud.io/dashboard?id=apachecloudstack) [![Lines of Code](https://sonarcloud.io/api/project_badges/measure?project=apachecloudstack&metric=ncloc)](https://sonarcloud.io/dashboard?id=apachecloudstack) ![GitHub language count](https://img.shields.io/github/languages/count/apache/cloudstack.svg) ![GitHub top language](https://img.shields.io/github/languages/top/apache/cloudstack.svg) ![Apache CloudStack](tools/logo/apache_cloudstack.png) diff --git a/api/src/main/java/com/cloud/network/router/VirtualRouter.java b/api/src/main/java/com/cloud/network/router/VirtualRouter.java index 84c85ce66758..8bec51990470 100644 --- a/api/src/main/java/com/cloud/network/router/VirtualRouter.java +++ b/api/src/main/java/com/cloud/network/router/VirtualRouter.java @@ -35,7 +35,7 @@ public enum UpdateState { boolean getIsRedundantRouter(); public enum RedundantState { - UNKNOWN, MASTER, BACKUP, FAULT + UNKNOWN, PRIMARY, BACKUP, FAULT } RedundantState getRedundantState(); diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 605a6c006ebd..70095841ac63 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -824,6 +824,7 @@ public class ApiConstants { public static final String KUBERNETES_VERSION_ID = "kubernetesversionid"; public static final String KUBERNETES_VERSION_NAME = "kubernetesversionname"; public static final String MASTER_NODES = "masternodes"; + public static final String CONTROL_NODES = "controlnodes"; public static final String MIN_SEMANTIC_VERSION = "minimumsemanticversion"; public static final String MIN_KUBERNETES_VERSION_ID = "minimumkubernetesversionid"; public static final String NODE_ROOT_DISK_SIZE = "noderootdisksize"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListResourceLimitsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListResourceLimitsCmd.java index 68c09c27fb03..a3d84f837fbd 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListResourceLimitsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListResourceLimitsCmd.java @@ -54,8 +54,8 @@ public class ListResourceLimitsCmd extends BaseListProjectAndAccountResourcesCmd + "5 - Project. Number of projects an account can own. " + "6 - Network. Number of networks an account can own. " + "7 - VPC. Number of VPC an account can own. " - + "8 - CPU. Number of CPU an account can allocate for his resources. " - + "9 - Memory. Amount of RAM an account can allocate for his resources. " + + "8 - CPU. Number of CPU an account can allocate for their resources. " + + "9 - Memory. Amount of RAM an account can allocate for their resources. " + "10 - PrimaryStorage. Total primary storage space (in GiB) a user can use. " + "11 - SecondaryStorage. Total secondary storage space (in GiB) a user can use. ") private Integer resourceType; @@ -69,8 +69,8 @@ public class ListResourceLimitsCmd extends BaseListProjectAndAccountResourcesCmd + "project - Project. Number of projects an account can own. " + "network - Network. Number of networks an account can own. " + "vpc - VPC. Number of VPC an account can own. " - + "cpu - CPU. Number of CPU an account can allocate for his resources. " - + "memory - Memory. Amount of RAM an account can allocate for his resources. " + + "cpu - CPU. Number of CPU an account can allocate for their resources. " + + "memory - Memory. Amount of RAM an account can allocate for their resources. " + "primary_storage - PrimaryStorage. Total primary storage space (in GiB) a user can use. " + "secondary_storage - SecondaryStorage. Total secondary storage space (in GiB) a user can use. ") private String resourceTypeName; diff --git a/api/src/main/java/org/apache/cloudstack/query/QueryService.java b/api/src/main/java/org/apache/cloudstack/query/QueryService.java index 57ac963bb8b1..3484de84ef49 100644 --- a/api/src/main/java/org/apache/cloudstack/query/QueryService.java +++ b/api/src/main/java/org/apache/cloudstack/query/QueryService.java @@ -92,8 +92,8 @@ public interface QueryService { ConfigKey AllowUserViewDestroyedVM = new ConfigKey<>("Advanced", Boolean.class, "allow.user.view.destroyed.vm", "false", "Determines whether users can view their destroyed or expunging vm ", true, ConfigKey.Scope.Account); - static final ConfigKey UserVMBlacklistedDetails = new ConfigKey("Advanced", String.class, - "user.vm.blacklisted.details", "rootdisksize, cpuOvercommitRatio, memoryOvercommitRatio, Message.ReservedCapacityFreed.Flag", + static final ConfigKey UserVMDeniedDetails = new ConfigKey("Advanced", String.class, + "user.vm.denied.details", "rootdisksize, cpuOvercommitRatio, memoryOvercommitRatio, Message.ReservedCapacityFreed.Flag", "Determines whether users can view certain VM settings. When set to empty, default value used is: rootdisksize, cpuOvercommitRatio, memoryOvercommitRatio, Message.ReservedCapacityFreed.Flag.", true); static final ConfigKey UserVMReadOnlyDetails = new ConfigKey("Advanced", String.class, diff --git a/client/conf/db.properties.in b/client/conf/db.properties.in index f94631c356a6..5ea63e43de26 100644 --- a/client/conf/db.properties.in +++ b/client/conf/db.properties.in @@ -83,21 +83,21 @@ db.simulator.autoReconnect=true db.ha.enabled=false db.ha.loadBalanceStrategy=com.cloud.utils.db.StaticStrategy # cloud stack Database -db.cloud.slaves=localhost,localhost +db.cloud.replicas=localhost,localhost db.cloud.autoReconnect=true db.cloud.failOverReadOnly=false db.cloud.reconnectAtTxEnd=true db.cloud.autoReconnectForPools=true -db.cloud.secondsBeforeRetryMaster=3600 -db.cloud.queriesBeforeRetryMaster=5000 +db.cloud.secondsBeforeRetrySource=3600 +db.cloud.queriesBeforeRetrySource=5000 db.cloud.initialTimeout=3600 #usage Database -db.usage.slaves=localhost,localhost +db.usage.replicas=localhost,localhost db.usage.autoReconnect=true db.usage.failOverReadOnly=false db.usage.reconnectAtTxEnd=true db.usage.autoReconnectForPools=true -db.usage.secondsBeforeRetryMaster=3600 -db.usage.queriesBeforeRetryMaster=5000 +db.usage.secondsBeforeRetrySource=3600 +db.usage.queriesBeforeRetrySource=5000 db.usage.initialTimeout=3600 diff --git a/core/src/main/java/com/cloud/agent/api/CheckRouterAnswer.java b/core/src/main/java/com/cloud/agent/api/CheckRouterAnswer.java index 6a95ab11bc8e..7f8626bf023a 100644 --- a/core/src/main/java/com/cloud/agent/api/CheckRouterAnswer.java +++ b/core/src/main/java/com/cloud/agent/api/CheckRouterAnswer.java @@ -48,8 +48,8 @@ protected boolean parseDetails(final String details) { state = RedundantState.UNKNOWN; return false; } - if (details.startsWith("Status: MASTER")) { - state = RedundantState.MASTER; + if (details.startsWith("Status: PRIMARY")) { + state = RedundantState.PRIMARY; } else if (details.startsWith("Status: BACKUP")) { state = RedundantState.BACKUP; } else if (details.startsWith("Status: FAULT")) { diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql index eec9bcd671b3..d5058c1b358c 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41510to41600.sql @@ -303,3 +303,14 @@ from -- Update name for global configuration user.vm.readonly.ui.details Update configuration set name='user.vm.readonly.details' where name='user.vm.readonly.ui.details'; + +-- Update name for global configuration 'user.vm.readonly.ui.details' to 'user.vm.denied.details' +UPDATE `cloud`.`configuration` SET name='user.vm.denied.details' WHERE name='user.vm.blacklisted.details'; + +-- Update name for global configuration 'blacklisted.routes' to 'denied.routes' +UPDATE `cloud`.`configuration` SET name='denied.routes', description='Routes that are denied, can not be used for Static Routes creation for the VPC Private Gateway' WHERE name='blacklisted.routes'; + +-- Rename 'master_node_count' to 'control_node_count' in kubernetes_cluster table +ALTER TABLE `cloud`.`kubernetes_cluster` CHANGE master_node_count control_node_count bigint NOT NULL default '0' COMMENT 'the number of the control nodes deployed for this Kubernetes cluster'; + +UPDATE `cloud`.`domain_router` SET redundant_state = 'PRIMARY' WHERE redundant_state = 'MASTER'; \ No newline at end of file diff --git a/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/EndpointSelectorTest.java b/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/EndpointSelectorTest.java index 29acf94b545d..6256452e72aa 100644 --- a/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/EndpointSelectorTest.java +++ b/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/EndpointSelectorTest.java @@ -72,7 +72,7 @@ import com.cloud.org.Cluster; import com.cloud.org.Managed; import com.cloud.resource.ResourceState; -import com.cloud.server.LockMasterListener; +import com.cloud.server.LockControllerListener; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ScopeType; import com.cloud.storage.Storage; @@ -120,12 +120,12 @@ public class EndpointSelectorTest { @Inject AccountManager accountManager; - LockMasterListener lockMasterListener; + LockControllerListener lockControllerListener; VolumeInfo vol = null; FakePrimaryDataStoreDriver driver = new FakePrimaryDataStoreDriver(); @Inject MockStorageMotionStrategy mockStorageMotionStrategy; - Merovingian2 _lockMaster; + Merovingian2 _lockController; @Inject DataStoreManager dataStoreManager; @Inject @@ -187,12 +187,12 @@ public void setUp() { when(accountManager.getSystemAccount()).thenReturn(account); when(accountManager.getSystemUser()).thenReturn(user); - if (Merovingian2.getLockMaster() == null) { - _lockMaster = Merovingian2.createLockMaster(1234); + if (Merovingian2.getLockController() == null) { + _lockController = Merovingian2.createLockController(1234); } else { - _lockMaster = Merovingian2.getLockMaster(); + _lockController = Merovingian2.getLockController(); } - _lockMaster.cleanupThisServer(); + _lockController.cleanupThisServer(); ComponentContext.initComponentsLifeCycle(); } diff --git a/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/SnapshotTestWithFakeData.java b/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/SnapshotTestWithFakeData.java index a3961ace64bb..152c279547c6 100644 --- a/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/SnapshotTestWithFakeData.java +++ b/engine/storage/integration-test/src/test/java/org/apache/cloudstack/storage/test/SnapshotTestWithFakeData.java @@ -73,7 +73,7 @@ import com.cloud.hypervisor.Hypervisor; import com.cloud.org.Cluster; import com.cloud.org.Managed; -import com.cloud.server.LockMasterListener; +import com.cloud.server.LockControllerListener; import com.cloud.storage.CreateSnapshotPayload; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ScopeType; @@ -134,12 +134,12 @@ public class SnapshotTestWithFakeData { ImageStoreVO imageStore; @Inject AccountManager accountManager; - LockMasterListener lockMasterListener; + LockControllerListener lockControllerListener; VolumeInfo vol = null; FakePrimaryDataStoreDriver driver = new FakePrimaryDataStoreDriver(); @Inject MockStorageMotionStrategy mockStorageMotionStrategy; - Merovingian2 _lockMaster; + Merovingian2 _lockController; @Inject SnapshotPolicyDao snapshotPolicyDao; @@ -189,18 +189,18 @@ public void setUp() { when(accountManager.getSystemAccount()).thenReturn(account); when(accountManager.getSystemUser()).thenReturn(user); - if (Merovingian2.getLockMaster() == null) { - _lockMaster = Merovingian2.createLockMaster(1234); + if (Merovingian2.getLockController() == null) { + _lockController = Merovingian2.createLockController(1234); } else { - _lockMaster = Merovingian2.getLockMaster(); + _lockController = Merovingian2.getLockController(); } - _lockMaster.cleanupThisServer(); + _lockController.cleanupThisServer(); ComponentContext.initComponentsLifeCycle(); } @After public void tearDown() throws Exception { - _lockMaster.cleanupThisServer(); + _lockController.cleanupThisServer(); } private SnapshotVO createSnapshotInDb() { diff --git a/framework/db/src/main/java/com/cloud/utils/db/Merovingian2.java b/framework/db/src/main/java/com/cloud/utils/db/Merovingian2.java index d2537e369bd6..956430f5c8bd 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/Merovingian2.java +++ b/framework/db/src/main/java/com/cloud/utils/db/Merovingian2.java @@ -68,7 +68,7 @@ private Merovingian2(long msId) { conn = TransactionLegacy.getStandaloneConnectionWithException(); conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); conn.setAutoCommit(true); - _concierge = new ConnectionConcierge("LockMaster", conn, true); + _concierge = new ConnectionConcierge("LockController", conn, true); } catch (SQLException e) { s_logger.error("Unable to get a new db connection", e); throw new CloudRuntimeException("Unable to initialize a connection to the database for locking purposes", e); @@ -83,8 +83,8 @@ private Merovingian2(long msId) { } } - public static synchronized Merovingian2 createLockMaster(long msId) { - assert s_instance == null : "No lock can serve two masters. Either he will hate the one and love the other, or he will be devoted to the one and despise the other."; + public static synchronized Merovingian2 createLockController(long msId) { + assert s_instance == null : "No lock can serve two controllers. Either we will hate the one and love the other, or we will be devoted to the one and despise the other."; s_instance = new Merovingian2(msId); s_instance.cleanupThisServer(); try { @@ -95,7 +95,7 @@ public static synchronized Merovingian2 createLockMaster(long msId) { return s_instance; } - public static Merovingian2 getLockMaster() { + public static Merovingian2 getLockController() { return s_instance; } diff --git a/framework/db/src/main/java/com/cloud/utils/db/TransactionLegacy.java b/framework/db/src/main/java/com/cloud/utils/db/TransactionLegacy.java index 2dde30275a52..eb6b09c31f3a 100644 --- a/framework/db/src/main/java/com/cloud/utils/db/TransactionLegacy.java +++ b/framework/db/src/main/java/com/cloud/utils/db/TransactionLegacy.java @@ -377,19 +377,19 @@ protected void mark(final String name) { } public boolean lock(final String name, final int timeoutSeconds) { - Merovingian2 lockMaster = Merovingian2.getLockMaster(); - if (lockMaster == null) { + Merovingian2 lockController = Merovingian2.getLockController(); + if (lockController == null) { throw new CloudRuntimeException("There's no support for locking yet"); } - return lockMaster.acquire(name, timeoutSeconds); + return lockController.acquire(name, timeoutSeconds); } public boolean release(final String name) { - Merovingian2 lockMaster = Merovingian2.getLockMaster(); - if (lockMaster == null) { + Merovingian2 lockController = Merovingian2.getLockController(); + if (lockController == null) { throw new CloudRuntimeException("There's no support for locking yet"); } - return lockMaster.release(name); + return lockController.release(name); } /** @@ -644,9 +644,9 @@ public void cleanup() { closeConnection(); _stack.clear(); - Merovingian2 lockMaster = Merovingian2.getLockMaster(); - if (lockMaster != null) { - lockMaster.cleanupThread(); + Merovingian2 lockController = Merovingian2.getLockController(); + if (lockController != null) { + lockController.cleanupThread(); } } @@ -1063,11 +1063,11 @@ public static void initDataSource(Properties dbProps) { final String url = dbProps.getProperty("db.cloud.url.params"); String cloudDbHAParams = null; - String cloudSlaves = null; + String cloudReplicas = null; if (s_dbHAEnabled) { cloudDbHAParams = getDBHAParams("cloud", dbProps); - cloudSlaves = dbProps.getProperty("db.cloud.slaves"); - s_logger.info("The slaves configured for Cloud Data base is/are : " + cloudSlaves); + cloudReplicas = dbProps.getProperty("db.cloud.replicas"); + s_logger.info("The replicas configured for Cloud Data base is/are : " + cloudReplicas); } final boolean useSSL = Boolean.parseBoolean(dbProps.getProperty("db.cloud.useSSL")); @@ -1078,7 +1078,7 @@ public static void initDataSource(Properties dbProps) { System.setProperty("javax.net.ssl.trustStorePassword", dbProps.getProperty("db.cloud.trustStorePassword")); } - final String cloudConnectionUri = cloudDriver + "://" + cloudHost + (s_dbHAEnabled ? "," + cloudSlaves : "") + ":" + cloudPort + "/" + cloudDbName + + final String cloudConnectionUri = cloudDriver + "://" + cloudHost + (s_dbHAEnabled ? "," + cloudReplicas : "") + ":" + cloudPort + "/" + cloudDbName + "?autoReconnect=" + cloudAutoReconnect + (url != null ? "&" + url : "") + (useSSL ? "&useSSL=true" : "") + (s_dbHAEnabled ? "&" + cloudDbHAParams : "") + (s_dbHAEnabled ? "&loadBalanceStrategy=" + loadBalanceStrategy : ""); DriverLoader.loadDriver(cloudDriver); @@ -1101,7 +1101,7 @@ public static void initDataSource(Properties dbProps) { final boolean usageAutoReconnect = Boolean.parseBoolean(dbProps.getProperty("db.usage.autoReconnect")); final String usageUrl = dbProps.getProperty("db.usage.url.params"); - final String usageConnectionUri = usageDriver + "://" + usageHost + (s_dbHAEnabled ? "," + dbProps.getProperty("db.cloud.slaves") : "") + ":" + usagePort + + final String usageConnectionUri = usageDriver + "://" + usageHost + (s_dbHAEnabled ? "," + dbProps.getProperty("db.cloud.replicas") : "") + ":" + usagePort + "/" + usageDbName + "?autoReconnect=" + usageAutoReconnect + (usageUrl != null ? "&" + usageUrl : "") + (s_dbHAEnabled ? "&" + getDBHAParams("usage", dbProps) : "") + (s_dbHAEnabled ? "&loadBalanceStrategy=" + loadBalanceStrategy : ""); DriverLoader.loadDriver(usageDriver); @@ -1196,8 +1196,8 @@ private static String getDBHAParams(String dbName, Properties dbProps) { sb.append("failOverReadOnly=" + dbProps.getProperty("db." + dbName + ".failOverReadOnly")); sb.append("&").append("reconnectAtTxEnd=" + dbProps.getProperty("db." + dbName + ".reconnectAtTxEnd")); sb.append("&").append("autoReconnectForPools=" + dbProps.getProperty("db." + dbName + ".autoReconnectForPools")); - sb.append("&").append("secondsBeforeRetryMaster=" + dbProps.getProperty("db." + dbName + ".secondsBeforeRetryMaster")); - sb.append("&").append("queriesBeforeRetryMaster=" + dbProps.getProperty("db." + dbName + ".queriesBeforeRetryMaster")); + sb.append("&").append("secondsBeforeRetrySource=" + dbProps.getProperty("db." + dbName + ".secondsBeforeRetrySource")); + sb.append("&").append("queriesBeforeRetrySource=" + dbProps.getProperty("db." + dbName + ".queriesBeforeRetrySource")); sb.append("&").append("initialTimeout=" + dbProps.getProperty("db." + dbName + ".initialTimeout")); return sb.toString(); } diff --git a/framework/db/src/test/java/com/cloud/utils/db/Merovingian2Test.java b/framework/db/src/test/java/com/cloud/utils/db/Merovingian2Test.java index 9d5cf1b84c82..eb8b96dc7472 100644 --- a/framework/db/src/test/java/com/cloud/utils/db/Merovingian2Test.java +++ b/framework/db/src/test/java/com/cloud/utils/db/Merovingian2Test.java @@ -26,53 +26,53 @@ public class Merovingian2Test extends TestCase { static final Logger s_logger = Logger.getLogger(Merovingian2Test.class); - Merovingian2 _lockMaster = Merovingian2.createLockMaster(1234); + Merovingian2 _lockController = Merovingian2.createLockController(1234); @Override @Before protected void setUp() throws Exception { - _lockMaster.cleanupThisServer(); + _lockController.cleanupThisServer(); } @Override @After protected void tearDown() throws Exception { - _lockMaster.cleanupThisServer(); + _lockController.cleanupThisServer(); } @Test public void testLockAndRelease() { s_logger.info("Testing first acquire"); - boolean result = _lockMaster.acquire("first" + 1234, 5); + boolean result = _lockController.acquire("first" + 1234, 5); Assert.assertTrue(result); s_logger.info("Testing acquire of different lock"); - result = _lockMaster.acquire("second" + 1234, 5); + result = _lockController.acquire("second" + 1234, 5); Assert.assertTrue(result); s_logger.info("Testing reacquire of the same lock"); - result = _lockMaster.acquire("first" + 1234, 5); + result = _lockController.acquire("first" + 1234, 5); Assert.assertTrue(result); - int count = _lockMaster.owns("first" + 1234); + int count = _lockController.owns("first" + 1234); Assert.assertEquals(count, 2); - count = _lockMaster.owns("second" + 1234); + count = _lockController.owns("second" + 1234); Assert.assertEquals(count, 1); s_logger.info("Testing release of the first lock"); - result = _lockMaster.release("first" + 1234); + result = _lockController.release("first" + 1234); Assert.assertTrue(result); - count = _lockMaster.owns("first" + 1234); + count = _lockController.owns("first" + 1234); Assert.assertEquals(count, 1); s_logger.info("Testing release of the second lock"); - result = _lockMaster.release("second" + 1234); + result = _lockController.release("second" + 1234); Assert.assertTrue(result); - result = _lockMaster.release("first" + 1234); + result = _lockController.release("first" + 1234); Assert.assertTrue(result); } diff --git a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java index 02cdf2a9df4c..f693bae8c33c 100644 --- a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java +++ b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java @@ -58,7 +58,7 @@ protected DynamicRoleBasedAPIAccessChecker() { } private void denyApiAccess(final String commandName) throws PermissionDeniedException { - throw new PermissionDeniedException("The API " + commandName + " is blacklisted for the account's role."); + throw new PermissionDeniedException("The API " + commandName + " is denied for the account's role."); } public boolean isDisabled() { diff --git a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java index 5a17bb993eb3..5648a96ea666 100644 --- a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java +++ b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java @@ -55,7 +55,7 @@ protected ProjectRoleBasedApiAccessChecker() { } private void denyApiAccess(final String commandName) throws PermissionDeniedException { - throw new PermissionDeniedException("The API " + commandName + " is blacklisted for the user's/account's project role."); + throw new PermissionDeniedException("The API " + commandName + " is denied for the user's/account's project role."); } diff --git a/plugins/acl/static-role-based/src/main/java/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java b/plugins/acl/static-role-based/src/main/java/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java index 6b40ab4ddffe..7550483b2306 100644 --- a/plugins/acl/static-role-based/src/main/java/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java +++ b/plugins/acl/static-role-based/src/main/java/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java @@ -90,7 +90,7 @@ public boolean checkAccess(User user, String commandName) throws PermissionDenie } if (commandNames.contains(commandName)) { - throw new PermissionDeniedException("The API is blacklisted. Role type=" + roleType.toString() + " is not allowed to request the api: " + commandName); + throw new PermissionDeniedException("The API is denied. Role type=" + roleType.toString() + " is not allowed to request the api: " + commandName); } else { throw new UnavailableCommandException("The API " + commandName + " does not exist or is not available for this account."); } diff --git a/plugins/database/mysql-ha/src/main/java/com/cloud/utils/db/StaticStrategy.java b/plugins/database/mysql-ha/src/main/java/com/cloud/utils/db/StaticStrategy.java index b3536523a06e..90a6dad16740 100644 --- a/plugins/database/mysql-ha/src/main/java/com/cloud/utils/db/StaticStrategy.java +++ b/plugins/database/mysql-ha/src/main/java/com/cloud/utils/db/StaticStrategy.java @@ -44,21 +44,21 @@ public JdbcConnection pickConnection(InvocationHandler proxy, List confi SQLException ex = null; - List whiteList = new ArrayList(numHosts); - whiteList.addAll(configuredHosts); + List allowList = new ArrayList(numHosts); + allowList.addAll(configuredHosts); - Map blackList = ((LoadBalancedConnectionProxy) proxy).getGlobalBlacklist(); + Map denylist = ((LoadBalancedConnectionProxy) proxy).getGlobalBlacklist(); - whiteList.removeAll(blackList.keySet()); + allowList.removeAll(denylist.keySet()); - Map whiteListMap = this.getArrayIndexMap(whiteList); + Map allowListMap = this.getArrayIndexMap(allowList); for (int attempts = 0; attempts < numRetries;) { - if (whiteList.size() == 0) { + if (allowList.size() == 0) { throw SQLError.createSQLException("No hosts configured", null); } - String hostPortSpec = whiteList.get(0); //Always take the first host + String hostPortSpec = allowList.get(0); //Always take the first host ConnectionImpl conn = (ConnectionImpl) liveConnections.get(hostPortSpec); @@ -70,16 +70,16 @@ public JdbcConnection pickConnection(InvocationHandler proxy, List confi if (((LoadBalancedConnectionProxy) proxy).shouldExceptionTriggerFailover(sqlEx)) { - Integer whiteListIndex = whiteListMap.get(hostPortSpec); + Integer allowListIndex = allowListMap.get(hostPortSpec); // exclude this host from being picked again - if (whiteListIndex != null) { - whiteList.remove(whiteListIndex.intValue()); - whiteListMap = this.getArrayIndexMap(whiteList); + if (allowListIndex != null) { + allowList.remove(allowListIndex.intValue()); + allowListMap = this.getArrayIndexMap(allowList); } ((LoadBalancedConnectionProxy) proxy).addToGlobalBlacklist(hostPortSpec); - if (whiteList.size() == 0) { + if (allowList.size() == 0) { attempts++; try { Thread.sleep(250); @@ -88,12 +88,12 @@ public JdbcConnection pickConnection(InvocationHandler proxy, List confi } // start fresh - whiteListMap = new HashMap(numHosts); - whiteList.addAll(configuredHosts); - blackList = ((LoadBalancedConnectionProxy) proxy).getGlobalBlacklist(); + allowListMap = new HashMap(numHosts); + allowList.addAll(configuredHosts); + denylist = ((LoadBalancedConnectionProxy) proxy).getGlobalBlacklist(); - whiteList.removeAll(blackList.keySet()); - whiteListMap = this.getArrayIndexMap(whiteList); + allowList.removeAll(denylist.keySet()); + allowListMap = this.getArrayIndexMap(allowList); } continue; diff --git a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java index 648bf7fc39e4..a41555dadcc4 100644 --- a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java +++ b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/hypervisor/OvmResourceBase.java @@ -224,7 +224,7 @@ public boolean configure(String name, Map params) throws Configu _conn = new Connection(_ip, _agentUserName, _agentPassword); try { - OvmHost.registerAsMaster(_conn); + OvmHost.registerAsPrimary(_conn); OvmHost.registerAsVmServer(_conn); _bridges = OvmBridge.getAllBridges(_conn); } catch (XmlRpcException e) { @@ -398,11 +398,11 @@ protected ReadyAnswer execute(ReadyCommand cmd) { try { OvmHost.Details d = OvmHost.getDetails(_conn); //TODO: cleanup halted vm - if (d.masterIp.equalsIgnoreCase(_ip)) { + if (d.primaryIp.equalsIgnoreCase(_ip)) { return new ReadyAnswer(cmd); } else { - s_logger.debug("Master IP changes to " + d.masterIp + ", it should be " + _ip); - return new ReadyAnswer(cmd, "I am not the master server"); + s_logger.debug("Primary IP changes to " + d.primaryIp + ", it should be " + _ip); + return new ReadyAnswer(cmd, "I am not the primary server"); } } catch (XmlRpcException e) { s_logger.debug("XML RPC Exception" + e.getMessage(), e); diff --git a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/OvmHost.java b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/OvmHost.java index ad1b2f6f2b72..664fea3de791 100644 --- a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/OvmHost.java +++ b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/OvmHost.java @@ -26,7 +26,7 @@ public class OvmHost extends OvmObject { public static final String XEN = "xen"; public static class Details { - public String masterIp; + public String primaryIp; public Integer cpuNum; public Integer cpuSpeed; public Long totalMemory; @@ -42,9 +42,9 @@ public String toJson() { } } - public static void registerAsMaster(Connection c) throws XmlRpcException { + public static void registerAsPrimary(Connection c) throws XmlRpcException { Object[] params = {c.getIp(), c.getUserName(), c.getPassword(), c.getPort(), c.getIsSsl()}; - c.call("OvmHost.registerAsMaster", params, false); + c.call("OvmHost.registerAsPrimary", params, false); } public static void registerAsVmServer(Connection c) throws XmlRpcException { diff --git a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/Test.java b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/Test.java index a8ab4f77eea5..cd1b14eeaa15 100644 --- a/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/Test.java +++ b/plugins/hypervisors/ovm/src/main/java/com/cloud/ovm/object/Test.java @@ -38,8 +38,6 @@ public static void main(final String[] args) { //pool.registerServer("192.168.105.155", Pool.ServerType.UTILITY); //pool.registerServer("192.168.105.155", Pool.ServerType.XEN); System.out.println("Is:" + pool.isServerRegistered()); - //String ip = pool.getMasterIp(); - //System.out.println("IP:" + ip); System.out.println(pool.getServerConfig()); System.out.println(pool.getServerXmInfo()); System.out.println(pool.getHostInfo()); @@ -89,8 +87,6 @@ public static void main(final String[] args) { /* This is not being used at the moment. * Coverity issue: 1012179 */ - //final String txt = - // "{\"MasterIp\": \"192.168.189.12\", \"dom0Memory\": 790626304, \"freeMemory\": 16378757120, \"totalMemory\": 17169383424, \"cpuNum\": 4, \"agentVersion\": \"2.3-38\", \"cpuSpeed\": 2261}"; //OvmHost.Details d = new GsonBuilder().create().fromJson(txt, OvmHost.Details.class); //OvmHost.Details d = Coder.fromJson(txt, OvmHost.Details.class); diff --git a/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmFaultConstants.py b/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmFaultConstants.py index d929d814da59..25b8e5a4d285 100755 --- a/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmFaultConstants.py +++ b/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmFaultConstants.py @@ -41,7 +41,7 @@ def __init__(self, err, code): "OvmDispatch.InvaildFunction":OvmDispatcherStub+3, "OvmVm.reboot":OvmDispatcherStub+4, - "OvmHost.registerAsMaster":OvmHostErrCodeStub+1, + "OvmHost.registerAsPrimary":OvmHostErrCodeStub+1, "OvmHost.registerAsVmServer":OvmHostErrCodeStub+2, "OvmHost.ping":OvmHostErrCodeStub+3, "OvmHost.getDetails":OvmHostErrCodeStub+4, diff --git a/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmHostModule.py b/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmHostModule.py index 3c61500630d7..de50e8b1bb66 100755 --- a/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmHostModule.py +++ b/plugins/hypervisors/ovm/src/main/scripts/vm/hypervisor/ovm/OvmHostModule.py @@ -95,9 +95,9 @@ def _getDomainIdByName(self, vmName): raise NoVmFoundException("No domain id for %s found"%vmName) @staticmethod - def registerAsMaster(hostname, username="oracle", password="password", port=8899, isSsl=False): + def registerAsPrimary(hostname, username="oracle", password="password", port=8899, isSsl=False): try: - logger.debug(OvmHost.registerAsMaster, "ip=%s, username=%s, password=%s, port=%s, isSsl=%s"%(hostname, username, password, port, isSsl)) + logger.debug(OvmHost.registerAsPrimary, "ip=%s, username=%s, password=%s, port=%s, isSsl=%s"%(hostname, username, password, port, isSsl)) exceptionIfNoSuccess(register_server(hostname, 'site', False, username, password, port, isSsl), "Register %s as site failed"%hostname) exceptionIfNoSuccess(register_server(hostname, 'utility', False, username, password, port, isSsl), @@ -106,8 +106,8 @@ def registerAsMaster(hostname, username="oracle", password="password", port=8899 return rs except Exception, e: errmsg = fmt_err_msg(e) - logger.error(OvmHost.registerAsMaster, errmsg) - raise XmlRpcFault(toErrCode(OvmHost, OvmHost.registerAsMaster), errmsg) + logger.error(OvmHost.registerAsPrimary, errmsg) + raise XmlRpcFault(toErrCode(OvmHost, OvmHost.registerAsPrimary), errmsg) @staticmethod def registerAsVmServer(hostname, username="oracle", password="password", port=8899, isSsl=False): diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Cluster.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Cluster.java index 344f708e6ef8..0d108069388c 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Cluster.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Cluster.java @@ -91,7 +91,7 @@ public Boolean discoverCluster() throws Ovm3ResourceException { * update_clusterConfiguration, * argument: self - default: None argument: cluster_conf - default: None <( * ? cluster_conf can be a "dict" or a plain file: print - * master.update_clusterConfiguration( + * primary.update_clusterConfiguration( * "heartbeat:\n\tregion = 0004FB0000050000E70FBDDEB802208F\n\tcluster = ba9aaf00ae5e2d72\n\nnode:\n\tip_port = 7777\n\tip_address = 192.168.1.64\n\tnumber = 0\n\tname = ovm-1\n\tcluster = ba9aaf00ae5e2d72\n\nnode:\n\tip_port = 7777\n\tip_address = 192.168.1.65\n\tnumber = 1\n\tname = ovm-2\n\tcluster = ba9aaf00ae5e2d72\n\ncluster:\n\tnode_count = 2\n\theartbeat_mode = global\n\tname = ba9aaf00ae5e2d72\n" * ) */ diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Linux.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Linux.java index b037dd73a4d3..c0c0f3fa6821 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Linux.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Linux.java @@ -57,7 +57,7 @@ public class Linux extends OvmObject { * {OS_Major_Version=5, Statistic=20, Membership_State=Unowned, * OVM_Version=3.2.1-517, OS_Type=Linux, Hypervisor_Name=Xen, * CPU_Type=x86_64, Manager_Core_API_Version=3.2.1.516, - * Is_Current_Master=false, OS_Name=Oracle VM Server, + * Is_Primary=false, OS_Name=Oracle VM Server, * Server_Roles=xen,utility, Pool_Unique_Id=none, * Host_Kernel_Release=2.6.39-300.22.2.el5uek, OS_Minor_Version=7, * Agent_Version=3.2.1-183, Boot_Time=1392366638, RPM_Version=3.2.1-183, @@ -154,8 +154,8 @@ public String getServerRoles() throws Ovm3ResourceException{ return get("Server_Roles"); } - public boolean getIsMaster() throws Ovm3ResourceException { - return Boolean.parseBoolean(get("Is_Current_Master")); + public boolean getIsPrimary() throws Ovm3ResourceException { + return Boolean.parseBoolean(get("Is_Primary")); } public String getOvmVersion() throws Ovm3ResourceException { diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Pool.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Pool.java index cf62993a2381..6306754185e8 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Pool.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/objects/Pool.java @@ -42,7 +42,7 @@ public class Pool extends OvmObject { }; private List poolHosts = new ArrayList(); private final List poolRoles = new ArrayList(); - private String poolMasterVip; + private String poolPrimaryVip; private String poolAlias; private String poolId = null; @@ -50,8 +50,8 @@ public Pool(Connection c) { setClient(c); } - public String getPoolMasterVip() { - return poolMasterVip; + public String getPoolPrimaryVip() { + return poolPrimaryVip; } public String getPoolAlias() { @@ -115,7 +115,7 @@ public Boolean createServerPool(String alias, String id, String vip, /* * public Boolean updatePoolVirtualIp(String ip) throws * Ovm3ResourceException { Object x = callWrapper("update_pool_virtual_ip", - * ip); if (x == null) { poolMasterVip = ip; return true; } return false; } + * ip); if (x == null) { poolPrimaryVip = ip; return true; } return false; } */ public Boolean leaveServerPool(String uuid) throws Ovm3ResourceException{ @@ -199,7 +199,7 @@ public Boolean discoverServerPool() throws Ovm3ResourceException { String path = "//Discover_Server_Pool_Result/Server_Pool"; poolId = xmlToString(path + "/Unique_Id", xmlDocument); poolAlias = xmlToString(path + "/Pool_Alias", xmlDocument); - poolMasterVip = xmlToString(path + "/Master_Virtual_Ip", + poolPrimaryVip = xmlToString(path + "/Primary_Virtual_Ip", xmlDocument); poolHosts.addAll(xmlToList(path + "//Registered_IP", xmlDocument)); if (poolId == null) { diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorResource.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorResource.java index d3bf4f0b674b..e897ca5e5edc 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorResource.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/Ovm3HypervisorResource.java @@ -308,7 +308,7 @@ public void setRunLevel(int level) { @Override public boolean configure(String name, Map params) throws ConfigurationException { LOGGER.debug("configure " + name + " with params: " + params); - /* check if we're master or not and if we can connect */ + /* check if we're primary or not and if we can connect */ try { configuration = new Ovm3Configuration(params); if (!configuration.getIsTest()) { @@ -320,7 +320,7 @@ public boolean configure(String name, Map params) throws Configu if (!configuration.getIsTest()) { hypervisorsupport.setupServer(configuration.getAgentSshKeyFileName()); } - hypervisorsupport.masterCheck(); + hypervisorsupport.primaryCheck(); } catch (Exception e) { throw new CloudRuntimeException("Base checks failed for " + configuration.getAgentHostname(), e); } diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3Configuration.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3Configuration.java index ba31236a7a80..9da760b97fb7 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3Configuration.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3Configuration.java @@ -50,8 +50,8 @@ public class Ovm3Configuration { private Boolean agentOvsAgentSsl = false; private String agentSshKeyFile = "id_rsa.cloud"; private String agentOwnedByUuid = "d1a749d4295041fb99854f52ea4dea97"; - private Boolean agentIsMaster = false; - private Boolean agentHasMaster = false; + private Boolean agentIsPrimary = false; + private Boolean agentHasPrimary = false; private Boolean agentInOvm3Pool = false; private Boolean agentInOvm3Cluster = false; private String ovm3PoolVip = ""; @@ -266,20 +266,20 @@ public void setAgentOwnedByUuid(String agentOwnedByUuid) { this.agentOwnedByUuid = agentOwnedByUuid; } - public Boolean getAgentIsMaster() { - return agentIsMaster; + public Boolean getAgentIsPrimary() { + return agentIsPrimary; } - public void setAgentIsMaster(Boolean agentIsMaster) { - this.agentIsMaster = agentIsMaster; + public void setAgentIsPrimary(Boolean agentIsPrimary) { + this.agentIsPrimary = agentIsPrimary; } - public Boolean getAgentHasMaster() { - return agentHasMaster; + public Boolean getAgentHasPrimary() { + return agentHasPrimary; } - public void setAgentHasMaster(Boolean agentHasMaster) { - this.agentHasMaster = agentHasMaster; + public void setAgentHasPrimary(Boolean agentHasPrimary) { + this.agentHasPrimary = agentHasPrimary; } public Boolean getAgentInOvm3Pool() { diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupport.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupport.java index 55a087d9746a..67a63d788f31 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupport.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupport.java @@ -246,8 +246,8 @@ public void fillHostInfo(StartupRoutingCommand cmd) { d.put("private.network.device", config.getAgentPrivateNetworkName()); d.put("guest.network.device", config.getAgentGuestNetworkName()); d.put("storage.network.device", config.getAgentStorageNetworkName()); - d.put("ismaster", config.getAgentIsMaster().toString()); - d.put("hasmaster", config.getAgentHasMaster().toString()); + d.put("isprimary", config.getAgentIsPrimary().toString()); + d.put("hasprimary", config.getAgentHasPrimary().toString()); cmd.setHostDetails(d); LOGGER.debug("Add an Ovm3 host " + config.getAgentHostname() + ":" + cmd.getHostDetails()); @@ -571,13 +571,13 @@ public CheckHealthAnswer execute(CheckHealthCommand cmd) { } /** - * materCheck + * primaryCheck * * @return */ - public boolean masterCheck() { + public boolean primaryCheck() { if ("".equals(config.getOvm3PoolVip())) { - LOGGER.debug("No cluster vip, not checking for master"); + LOGGER.debug("No cluster vip, not checking for primary"); return false; } @@ -585,26 +585,26 @@ public boolean masterCheck() { CloudstackPlugin cSp = new CloudstackPlugin(c); if (cSp.dom0HasIp(config.getOvm3PoolVip())) { LOGGER.debug(config.getAgentHostname() - + " is a master, already has vip " + + " is a primary, already has vip " + config.getOvm3PoolVip()); - config.setAgentIsMaster(true); + config.setAgentIsPrimary(true); } else if (cSp.ping(config.getOvm3PoolVip())) { LOGGER.debug(config.getAgentHostname() - + " has a master, someone has vip " + + " has a primary, someone has vip " + config.getOvm3PoolVip()); - config.setAgentHasMaster(true); + config.setAgentHasPrimary(true); } else { LOGGER.debug(config.getAgentHostname() - + " becomes a master, no one has vip " + + " becomes a primary, no one has vip " + config.getOvm3PoolVip()); - config.setAgentIsMaster(true); + config.setAgentIsPrimary(true); } } catch (Ovm3ResourceException e) { LOGGER.debug(config.getAgentHostname() - + " can't reach master: " + e.getMessage()); - config.setAgentHasMaster(false); + + " can't reach primary: " + e.getMessage()); + config.setAgentHasPrimary(false); } - return config.getAgentIsMaster(); + return config.getAgentIsPrimary(); } /* Check if the host is in ready state for CS */ @@ -614,22 +614,22 @@ public ReadyAnswer execute(ReadyCommand cmd) { Pool pool = new Pool(c); /* only interesting when doing cluster */ - if (!host.getIsMaster() && config.getAgentInOvm3Cluster()) { - if (pool.getPoolMasterVip().equalsIgnoreCase(c.getIp())) { + if (!host.getIsPrimary() && config.getAgentInOvm3Cluster()) { + if (pool.getPoolPrimaryVip().equalsIgnoreCase(c.getIp())) { /* check pool state here */ return new ReadyAnswer(cmd); } else { - LOGGER.debug("Master IP changes to " - + pool.getPoolMasterVip() + ", it should be " + LOGGER.debug("Primary IP changes to " + + pool.getPoolPrimaryVip() + ", it should be " + c.getIp()); - return new ReadyAnswer(cmd, "I am not the master server"); + return new ReadyAnswer(cmd, "I am not the primary server"); } - } else if (host.getIsMaster()) { - LOGGER.debug("Master, not clustered " + } else if (host.getIsPrimary()) { + LOGGER.debug("Primary, not clustered " + config.getAgentHostname()); return new ReadyAnswer(cmd); } else { - LOGGER.debug("No master, not clustered " + LOGGER.debug("No primary, not clustered " + config.getAgentHostname()); return new ReadyAnswer(cmd); } diff --git a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3StoragePool.java b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3StoragePool.java index 6f76eb981502..17ff7153fcf9 100644 --- a/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3StoragePool.java +++ b/plugins/hypervisors/ovm3/src/main/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3StoragePool.java @@ -138,7 +138,7 @@ private void takeOwnership33x(Pool pool) throws ConfigurationException { * @throws ConfigurationException */ public boolean prepareForPool() throws ConfigurationException { - /* need single master uuid */ + /* need single primary uuid */ try { Linux host = new Linux(c); Pool pool = new Pool(c); @@ -201,7 +201,7 @@ private Boolean setupPool(StorageFilerTO cmd) throws Ovm3ResourceException { Pool poolHost = new Pool(c); PoolOCFS2 poolFs = new PoolOCFS2(c); - if (config.getAgentIsMaster()) { + if (config.getAgentIsPrimary()) { try { LOGGER.debug("Create poolfs on " + config.getAgentHostname() + " for repo " + primUuid); @@ -218,7 +218,7 @@ private Boolean setupPool(StorageFilerTO cmd) throws Ovm3ResourceException { } catch (Ovm3ResourceException e) { throw e; } - } else if (config.getAgentHasMaster()) { + } else if (config.getAgentHasPrimary()) { try { poolHost.joinServerPool(poolAlias, primUuid, config.getOvm3PoolVip(), poolSize + 1, @@ -262,15 +262,15 @@ private Boolean addMembers() throws Ovm3ResourceException { try { Connection m = new Connection(config.getOvm3PoolVip(), c.getPort(), c.getUserName(), c.getPassword()); - Pool poolMaster = new Pool(m); - if (poolMaster.isInAPool()) { - members.addAll(poolMaster.getPoolMemberList()); - if (!poolMaster.getPoolMemberList().contains(c.getIp()) + Pool poolPrimary = new Pool(m); + if (poolPrimary.isInAPool()) { + members.addAll(poolPrimary.getPoolMemberList()); + if (!poolPrimary.getPoolMemberList().contains(c.getIp()) && c.getIp().equals(config.getOvm3PoolVip())) { members.add(c.getIp()); } } else { - LOGGER.warn(c.getIp() + " noticed master " + LOGGER.warn(c.getIp() + " noticed primary " + config.getOvm3PoolVip() + " is not part of pool"); return false; } @@ -306,7 +306,7 @@ public Answer execute(DeleteStoragePoolCommand cmd) { try { Pool pool = new Pool(c); pool.leaveServerPool(cmd.getPool().getUuid()); - /* also connect to the master and update the pool list ? */ + /* also connect to the primary and update the pool list ? */ } catch (Ovm3ResourceException e) { LOGGER.debug( "Delete storage pool on host " @@ -448,8 +448,8 @@ private void prepareSecondaryStorageStore(String storageUrl, GlobalLock lock = GlobalLock.getInternLock("prepare.systemvm"); try { /* double check */ - if (config.getAgentHasMaster() && config.getAgentInOvm3Pool()) { - LOGGER.debug("Skip systemvm iso copy, leave it to the master"); + if (config.getAgentHasPrimary() && config.getAgentInOvm3Pool()) { + LOGGER.debug("Skip systemvm iso copy, leave it to the primary"); return; } if (lock.lock(3600)) { diff --git a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/LinuxTest.java b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/LinuxTest.java index 9eb60fe131b2..bb6a931b4640 100644 --- a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/LinuxTest.java +++ b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/LinuxTest.java @@ -71,8 +71,8 @@ public LinuxTest() { + "<Registered_IP>192.168.1.64</Registered_IP>" + "<Node_Number>1</Node_Number>" + "<Server_Roles>xen,utility</Server_Roles>" - + "<Is_Current_Master>true</Is_Current_Master>" - + "<Master_Virtual_Ip>192.168.1.230</Master_Virtual_Ip>" + + "<Is_Primary>true</Is_Primary>" + + "<Primary_Virtual_Ip>192.168.1.230</Primary_Virtual_Ip>" + "<Manager_Core_API_Version>3.2.1.516</Manager_Core_API_Version>" + "<Membership_State>Pooled</Membership_State>" + "<Cluster_State>Offline</Cluster_State>" diff --git a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/PoolTest.java b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/PoolTest.java index f1c683570373..0f955f1e7d9c 100644 --- a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/PoolTest.java +++ b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/objects/PoolTest.java @@ -46,9 +46,9 @@ public class PoolTest { + " " + ALIAS + "" - + " " + + " " + VIP - + "" + + "" + " " + " " + " " @@ -78,7 +78,7 @@ public void testDiscoverServerPool() throws Ovm3ResourceException { results.basicStringTest(pool.getPoolId(), UUID); results.basicStringTest(pool.getPoolId(), UUID); results.basicStringTest(pool.getPoolAlias(), ALIAS); - results.basicStringTest(pool.getPoolMasterVip(), VIP); + results.basicStringTest(pool.getPoolPrimaryVip(), VIP); results.basicBooleanTest(pool.getPoolMemberList().contains(IP)); results.basicBooleanTest(pool.getPoolMemberList().contains(IP2)); } diff --git a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3ConfigurationTest.java b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3ConfigurationTest.java index 80ee54f4d6dc..2665975f5ab4 100644 --- a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3ConfigurationTest.java +++ b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3ConfigurationTest.java @@ -45,7 +45,7 @@ public class Ovm3ConfigurationTest { params.put("password", "unknown"); params.put("username", "root"); params.put("pool", "a9c1219d-817d-4242-b23e-2607801c79d5"); - params.put("ismaster", "false"); + params.put("isprimary", "false"); params.put("storage.network.device", "xenbr0"); params.put("Host.OS.Version", "5.7"); params.put("xenserver.nics.max", "7"); @@ -64,7 +64,7 @@ public class Ovm3ConfigurationTest { params.put("ip", "192.168.1.64"); params.put("guid", "19e5f1e7-22f4-3b6d-8d41-c82f89c65295"); params.put("ovm3vip", "192.168.1.230"); - params.put("hasmaster", "true"); + params.put("hasprimary", "true"); params.put("guest.network.device", "xenbr0"); params.put("cluster", "1"); params.put("xenserver.heartbeat.timeout", "120"); diff --git a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupportTest.java b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupportTest.java index b9a603150f02..c3614888ef3e 100644 --- a/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupportTest.java +++ b/plugins/hypervisors/ovm3/src/test/java/com/cloud/hypervisor/ovm3/resources/helpers/Ovm3HypervisorSupportTest.java @@ -192,9 +192,9 @@ public void checkHealthTest() throws ConfigurationException { } @Test - public void masterCheckTest() throws ConfigurationException { + public void primaryCheckTest() throws ConfigurationException { con = prepare(); - // System.out.println(hypervisor.masterCheck()); + // System.out.println(hypervisor.primaryCheck()); } @Test diff --git a/plugins/hypervisors/ovm3/src/test/resources/scripts/clean_master.sh b/plugins/hypervisors/ovm3/src/test/resources/scripts/clean_primary.sh similarity index 100% rename from plugins/hypervisors/ovm3/src/test/resources/scripts/clean_master.sh rename to plugins/hypervisors/ovm3/src/test/resources/scripts/clean_primary.sh diff --git a/plugins/hypervisors/ovm3/src/test/resources/scripts/clean_slave.sh b/plugins/hypervisors/ovm3/src/test/resources/scripts/clean_secondary.sh similarity index 100% rename from plugins/hypervisors/ovm3/src/test/resources/scripts/clean_slave.sh rename to plugins/hypervisors/ovm3/src/test/resources/scripts/clean_secondary.sh diff --git a/plugins/hypervisors/ovm3/src/test/resources/scripts/create_pool_cluster.py b/plugins/hypervisors/ovm3/src/test/resources/scripts/create_pool_cluster.py index 6c398519dc35..bba41ab48e05 100755 --- a/plugins/hypervisors/ovm3/src/test/resources/scripts/create_pool_cluster.py +++ b/plugins/hypervisors/ovm3/src/test/resources/scripts/create_pool_cluster.py @@ -254,7 +254,7 @@ for node in poolDom.getElementsByTagName('Server_Pool'): id = node.getElementsByTagName('Unique_Id')[0].firstChild.nodeValue alias = node.getElementsByTagName('Pool_Alias')[0].firstChild.nodeValue - mvip = node.getElementsByTagName('Master_Virtual_Ip')[0].firstChild.nodeValue + mvip = node.getElementsByTagName('Primary_Virtual_Ip')[0].firstChild.nodeValue print "pool: %s, %s, %s" % (id, mvip, alias) members = node.getElementsByTagName('Member') for member in members: diff --git a/plugins/hypervisors/ovm3/src/test/resources/scripts/info.py b/plugins/hypervisors/ovm3/src/test/resources/scripts/info.py index 19ff71e60669..be2f8b18c9ba 100755 --- a/plugins/hypervisors/ovm3/src/test/resources/scripts/info.py +++ b/plugins/hypervisors/ovm3/src/test/resources/scripts/info.py @@ -68,14 +68,14 @@ def is_it_up(host, port): print "host: %s:%s UP" % (host, port) return True -# hmm master actions don't apply to a slave -master="192.168.1.161" +# hmm primary actions don't apply to a secondary +primary="192.168.1.161" port=8899 user = "oracle" password = "test123" auth = "%s:%s" % (user, password) server = getCon(auth, 'localhost', port) -mserver = getCon(auth, master, port) +mserver = getCon(auth, primary, port) poolNode=True interface = "c0a80100" role='xen,utility' @@ -93,7 +93,7 @@ def is_it_up(host, port): for node in poolDom.getElementsByTagName('Server_Pool'): id = node.getElementsByTagName('Unique_Id')[0].firstChild.nodeValue alias = node.getElementsByTagName('Pool_Alias')[0].firstChild.nodeValue - mvip = node.getElementsByTagName('Master_Virtual_Ip')[0].firstChild.nodeValue + mvip = node.getElementsByTagName('Primary_Virtual_Ip')[0].firstChild.nodeValue print "pool: %s, %s, %s" % (id, mvip, alias) members = node.getElementsByTagName('Member') for member in members: diff --git a/plugins/hypervisors/ovm3/src/test/resources/scripts/password.py b/plugins/hypervisors/ovm3/src/test/resources/scripts/password.py index 748023d01272..c8ab89e7a638 100755 --- a/plugins/hypervisors/ovm3/src/test/resources/scripts/password.py +++ b/plugins/hypervisors/ovm3/src/test/resources/scripts/password.py @@ -42,7 +42,7 @@ def getCon(host, port): return server -# hmm master actions don't apply to a slave +# hmm primary actions don't apply to a secondary port = 8899 user = "oracle" password = "test123" diff --git a/plugins/hypervisors/ovm3/src/test/resources/scripts/repo_pool.py b/plugins/hypervisors/ovm3/src/test/resources/scripts/repo_pool.py index 38861644b63e..f9a47dbf68c0 100755 --- a/plugins/hypervisors/ovm3/src/test/resources/scripts/repo_pool.py +++ b/plugins/hypervisors/ovm3/src/test/resources/scripts/repo_pool.py @@ -44,14 +44,14 @@ def is_it_up(host, port): print "host: %s:%s UP" % (host, port) return True -# hmm master actions don't apply to a slave -master = "192.168.1.161" +# hmm primary actions don't apply to a secondary +primary = "192.168.1.161" port = 8899 user = "oracle" password = "*******" auth = "%s:%s" % (user, password) server = ServerProxy("http://%s:%s" % ("localhost", port)) -mserver = ServerProxy("http://%s@%s:%s" % (auth, master, port)) +mserver = ServerProxy("http://%s@%s:%s" % (auth, primary, port)) poolNode = True interface = "c0a80100" role = 'xen,utility' @@ -63,11 +63,11 @@ def is_it_up(host, port): print "setting up password" server.update_agent_password(user, password) -if (is_it_up(master, port)): - print "master seems to be up, slaving" +if (is_it_up(primary, port)): + print "primary seems to be up, will become secondary" xserver = mserver else: - print "no master yet, will become master" + print "no primary yet, will become primary" # other mechanism must be used to make interfaces equal... try: @@ -79,7 +79,7 @@ def is_it_up(host, port): poolfsuuid = poolid clusterid = "ba9aaf00ae5e2d72" mgr = "d1a749d4295041fb99854f52ea4dea97" - poolmvip = master + poolmvip = primary poolfsnfsbaseuuid = "6824e646-5908-48c9-ba44-bb1a8a778084" repoid = "6824e646590848c9ba44bb1a8a778084" @@ -114,7 +114,7 @@ def is_it_up(host, port): for node in poolDom.getElementsByTagName('Server_Pool'): id = node.getElementsByTagName('Unique_Id')[0].firstChild.nodeValue alias = node.getElementsByTagName('Pool_Alias')[0].firstChild.nodeValue - mvip = node.getElementsByTagName('Master_Virtual_Ip')[0].firstChild.nodeValue + mvip = node.getElementsByTagName('Primary_Virtual_Ip')[0].firstChild.nodeValue print "pool: %s, %s, %s" % (id, mvip, alias) members = node.getElementsByTagName('Member') for member in members: @@ -127,7 +127,7 @@ def is_it_up(host, port): poolMembers.append(mip) except Error, v: - print "no master will become master, %s" % v + print "no primary will become primary, %s" % v if (pooled == False): # setup the repository diff --git a/plugins/hypervisors/ovm3/src/test/resources/scripts/simple_pool.py b/plugins/hypervisors/ovm3/src/test/resources/scripts/simple_pool.py index 33789a2faf7e..4ded83ef0053 100755 --- a/plugins/hypervisors/ovm3/src/test/resources/scripts/simple_pool.py +++ b/plugins/hypervisors/ovm3/src/test/resources/scripts/simple_pool.py @@ -55,14 +55,14 @@ def get_ip_address(ifname): struct.pack('256s', ifname[:15]) )[20:24]) -# hmm master actions don't apply to a slave -master = "192.168.1.161" +# hmm primary actions don't apply to a secondary +primary = "192.168.1.161" port = 8899 passw = 'test123' user = 'oracle' auth = "%s:%s" % (user, passw) server = getCon(auth, "localhost", port) -mserver = getCon(auth, master, port) +mserver = getCon(auth, primary, port) try: mserver.echo("test") except AttributeError, v: @@ -81,7 +81,7 @@ def get_ip_address(ifname): poolalias = "Pool 0" clusterid = "ba9aaf00ae5e2d72" mgr = "d1a749d4295041fb99854f52ea4dea97" - poolmvip = master + poolmvip = primary # primary primuuid = "7718562d872f47a7b4548f9cac4ffa3a" @@ -119,7 +119,7 @@ def get_ip_address(ifname): for node in poolDom.getElementsByTagName('Server_Pool'): id = node.getElementsByTagName('Unique_Id')[0].firstChild.nodeValue alias = node.getElementsByTagName('Pool_Alias')[0].firstChild.nodeValue - mvip = node.getElementsByTagName('Master_Virtual_Ip')[0].firstChild.nodeValue + mvip = node.getElementsByTagName('Primary_Virtual_Ip')[0].firstChild.nodeValue print "pool: %s, %s, %s" % (id, mvip, alias) members = node.getElementsByTagName('Member') for member in members: @@ -134,10 +134,10 @@ def get_ip_address(ifname): # if (pooled == False): try: if (poolCount == 0): - print "master" + print "primary" # check if a pool exists already if not create # pool if so add us to the pool - print server.configure_virtual_ip(master, ip) + print server.configure_virtual_ip(primary, ip) print server.create_pool_filesystem( fstype, fsmntpoint, @@ -157,7 +157,7 @@ def get_ip_address(ifname): ) else: try: - print "slave" + print "secondary" print server.join_server_pool(poolalias, primuuid, poolmvip, @@ -174,7 +174,7 @@ def get_ip_address(ifname): # con = getCon(auth, node, port) # print con.set_pool_member_ip_list(nodes); print mserver.dispatch("http://%s@%s:%s/api/3" % (auth, node, port), "set_pool_member_ip_list", nodes) - # print server.configure_virtual_ip(master, ip) + # print server.configure_virtual_ip(primary, ip) except Error, e: print "something went wrong: %s" % (e) diff --git a/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockVmManagerImpl.java b/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockVmManagerImpl.java index 6eaf09cad856..365dfb097d27 100644 --- a/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockVmManagerImpl.java +++ b/plugins/hypervisors/simulator/src/main/java/com/cloud/agent/manager/MockVmManagerImpl.java @@ -261,9 +261,9 @@ public CheckRouterAnswer checkRouter(final CheckRouterCommand cmd) { final MockVm vm = _mockVmDao.findByVmName(router_name); final String args = vm.getBootargs(); if (args.indexOf("router_pr=100") > 0) { - s_logger.debug("Router priority is for MASTER"); - final CheckRouterAnswer ans = new CheckRouterAnswer(cmd, "Status: MASTER", true); - ans.setState(VirtualRouter.RedundantState.MASTER); + s_logger.debug("Router priority is for PRIMARY"); + final CheckRouterAnswer ans = new CheckRouterAnswer(cmd, "Status: PRIMARY", true); + ans.setState(VirtualRouter.RedundantState.PRIMARY); return ans; } else { s_logger.debug("Router priority is for BACKUP"); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java index aef304a03d36..0e1f91c9c8a8 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesCluster.java @@ -119,7 +119,7 @@ enum State { long getNetworkId(); long getDomainId(); long getAccountId(); - long getMasterNodeCount(); + long getControlNodeCount(); long getNodeCount(); long getTotalNodeCount(); String getKeyPair(); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java index f091d0716905..a384a07d7b34 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterManagerImpl.java @@ -583,7 +583,8 @@ public KubernetesClusterResponse createKubernetesClusterResponse(long kubernetes DataCenterVO zone = ApiDBUtils.findZoneById(kubernetesCluster.getZoneId()); response.setZoneId(zone.getUuid()); response.setZoneName(zone.getName()); - response.setMasterNodes(kubernetesCluster.getMasterNodeCount()); + response.setMasterNodes(kubernetesCluster.getControlNodeCount()); + response.setControlNodes(kubernetesCluster.getControlNodeCount()); response.setClusterSize(kubernetesCluster.getNodeCount()); VMTemplateVO template = ApiDBUtils.findTemplateById(kubernetesCluster.getTemplateId()); response.setTemplateId(template.getUuid()); @@ -651,7 +652,7 @@ private void validateKubernetesClusterCreateParameters(final CreateKubernetesClu final Account owner = accountService.getActiveAccountById(cmd.getEntityOwnerId()); final Long networkId = cmd.getNetworkId(); final String sshKeyPair = cmd.getSSHKeyPairName(); - final Long masterNodeCount = cmd.getMasterNodes(); + final Long controlNodeCount = cmd.getControlNodes(); final Long clusterSize = cmd.getClusterSize(); final String dockerRegistryUserName = cmd.getDockerRegistryUserName(); final String dockerRegistryPassword = cmd.getDockerRegistryPassword(); @@ -664,8 +665,8 @@ private void validateKubernetesClusterCreateParameters(final CreateKubernetesClu throw new InvalidParameterValueException("Invalid name for the Kubernetes cluster name:" + name); } - if (masterNodeCount < 1 || masterNodeCount > 100) { - throw new InvalidParameterValueException("Invalid cluster master nodes count: " + masterNodeCount); + if (controlNodeCount < 1 || controlNodeCount > 100) { + throw new InvalidParameterValueException("Invalid cluster control nodes count: " + controlNodeCount); } if (clusterSize < 1 || clusterSize > 100) { @@ -695,7 +696,7 @@ private void validateKubernetesClusterCreateParameters(final CreateKubernetesClu if (clusterKubernetesVersion.getZoneId() != null && !clusterKubernetesVersion.getZoneId().equals(zone.getId())) { throw new InvalidParameterValueException(String.format("Kubernetes version ID: %s is not available for zone ID: %s", clusterKubernetesVersion.getUuid(), zone.getUuid())); } - if (masterNodeCount > 1 ) { + if (controlNodeCount > 1 ) { try { if (KubernetesVersionManagerImpl.compareSemanticVersions(clusterKubernetesVersion.getSemanticVersion(), MIN_KUBERNETES_VERSION_HA_SUPPORT) < 0) { throw new InvalidParameterValueException(String.format("HA support is available only for Kubernetes version %s and above. Given version ID: %s is %s", MIN_KUBERNETES_VERSION_HA_SUPPORT, clusterKubernetesVersion.getUuid(), clusterKubernetesVersion.getSemanticVersion())); @@ -765,14 +766,14 @@ private void validateKubernetesClusterCreateParameters(final CreateKubernetesClu } } - private Network getKubernetesClusterNetworkIfMissing(final String clusterName, final DataCenter zone, final Account owner, final int masterNodesCount, + private Network getKubernetesClusterNetworkIfMissing(final String clusterName, final DataCenter zone, final Account owner, final int controlNodesCount, final int nodesCount, final String externalLoadBalancerIpAddress, final Long networkId) throws CloudRuntimeException { Network network = null; if (networkId != null) { network = networkDao.findById(networkId); if (Network.GuestType.Isolated.equals(network.getGuestType())) { if (kubernetesClusterDao.listByNetworkId(network.getId()).isEmpty()) { - if (!validateNetwork(network, masterNodesCount + nodesCount)) { + if (!validateNetwork(network, controlNodesCount + nodesCount)) { throw new InvalidParameterValueException(String.format("Network ID: %s is not suitable for Kubernetes cluster", network.getUuid())); } networkModel.checkNetworkPermissions(owner, network); @@ -780,8 +781,8 @@ private Network getKubernetesClusterNetworkIfMissing(final String clusterName, f throw new InvalidParameterValueException(String.format("Network ID: %s is already under use by another Kubernetes cluster", network.getUuid())); } } else if (Network.GuestType.Shared.equals(network.getGuestType())) { - if (masterNodesCount > 1 && Strings.isNullOrEmpty(externalLoadBalancerIpAddress)) { - throw new InvalidParameterValueException(String.format("Multi-master, HA Kubernetes cluster with %s network ID: %s needs an external load balancer IP address. %s parameter can be used", + if (controlNodesCount > 1 && Strings.isNullOrEmpty(externalLoadBalancerIpAddress)) { + throw new InvalidParameterValueException(String.format("Multi-control nodes, HA Kubernetes cluster with %s network ID: %s needs an external load balancer IP address. %s parameter can be used", network.getGuestType().toString(), network.getUuid(), ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS)); } } @@ -1005,9 +1006,9 @@ public KubernetesCluster createKubernetesCluster(CreateKubernetesClusterCmd cmd) validateKubernetesClusterCreateParameters(cmd); final DataCenter zone = dataCenterDao.findById(cmd.getZoneId()); - final long masterNodeCount = cmd.getMasterNodes(); + final long controlNodeCount = cmd.getControlNodes(); final long clusterSize = cmd.getClusterSize(); - final long totalNodeCount = masterNodeCount + clusterSize; + final long totalNodeCount = controlNodeCount + clusterSize; final ServiceOffering serviceOffering = serviceOfferingDao.findById(cmd.getServiceOfferingId()); final Account owner = accountService.getActiveAccountById(cmd.getEntityOwnerId()); final KubernetesSupportedVersion clusterKubernetesVersion = kubernetesSupportedVersionDao.findById(cmd.getKubernetesVersionId()); @@ -1022,17 +1023,17 @@ public KubernetesCluster createKubernetesCluster(CreateKubernetesClusterCmd cmd) logAndThrow(Level.ERROR, String.format("Creating Kubernetes cluster failed due to error while finding suitable deployment plan for cluster in zone : %s", zone.getName())); } - final Network defaultNetwork = getKubernetesClusterNetworkIfMissing(cmd.getName(), zone, owner, (int)masterNodeCount, (int)clusterSize, cmd.getExternalLoadBalancerIpAddress(), cmd.getNetworkId()); + final Network defaultNetwork = getKubernetesClusterNetworkIfMissing(cmd.getName(), zone, owner, (int)controlNodeCount, (int)clusterSize, cmd.getExternalLoadBalancerIpAddress(), cmd.getNetworkId()); final VMTemplateVO finalTemplate = getKubernetesServiceTemplate(deployDestination.getCluster().getHypervisorType()); - final long cores = serviceOffering.getCpu() * (masterNodeCount + clusterSize); - final long memory = serviceOffering.getRamSize() * (masterNodeCount + clusterSize); + final long cores = serviceOffering.getCpu() * (controlNodeCount + clusterSize); + final long memory = serviceOffering.getRamSize() * (controlNodeCount + clusterSize); final KubernetesClusterVO cluster = Transaction.execute(new TransactionCallback() { @Override public KubernetesClusterVO doInTransaction(TransactionStatus status) { KubernetesClusterVO newCluster = new KubernetesClusterVO(cmd.getName(), cmd.getDisplayName(), zone.getId(), clusterKubernetesVersion.getId(), serviceOffering.getId(), finalTemplate.getId(), defaultNetwork.getId(), owner.getDomainId(), - owner.getAccountId(), masterNodeCount, clusterSize, KubernetesCluster.State.Created, cmd.getSSHKeyPairName(), cores, memory, cmd.getNodeRootDiskSize(), ""); + owner.getAccountId(), controlNodeCount, clusterSize, KubernetesCluster.State.Created, cmd.getSSHKeyPairName(), cores, memory, cmd.getNodeRootDiskSize(), ""); kubernetesClusterDao.persist(newCluster); return newCluster; } @@ -1318,7 +1319,7 @@ public void reallyRun() { /* Kubernetes cluster scanner checks if the Kubernetes cluster is in desired state. If it detects Kubernetes cluster is not in desired state, it will trigger an event and marks the Kubernetes cluster to be 'Alert' state. For e.g a Kubernetes cluster in 'Running' state should mean all the cluster of node VM's in the custer should be running and - number of the node VM's should be of cluster size, and the master node VM's is running. It is possible due to + number of the node VM's should be of cluster size, and the control node VM's is running. It is possible due to out of band changes by user or hosts going down, we may end up one or more VM's in stopped state. in which case scanner detects these changes and marks the cluster in 'Alert' state. Similarly cluster in 'Stopped' state means all the cluster VM's are in stopped state any mismatch in states should get picked up by Kubernetes cluster and @@ -1442,7 +1443,7 @@ public void reallyRun() { boolean isClusterVMsInDesiredState(KubernetesCluster kubernetesCluster, VirtualMachine.State state) { List clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); - // check cluster is running at desired capacity include master nodes as well + // check cluster is running at desired capacity include control nodes as well if (clusterVMs.size() < kubernetesCluster.getTotalNodeCount()) { if (LOGGER.isDebugEnabled()) { LOGGER.debug(String.format("Found only %d VMs in the Kubernetes cluster ID: %s while expected %d VMs to be in state: %s", diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java index 9ff0be335f37..b6a37d9607c8 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/KubernetesClusterVO.java @@ -69,8 +69,8 @@ public class KubernetesClusterVO implements KubernetesCluster { @Column(name = "account_id") private long accountId; - @Column(name = "master_node_count") - private long masterNodeCount; + @Column(name = "control_node_count") + private long controlNodeCount; @Column(name = "node_count") private long nodeCount; @@ -202,12 +202,12 @@ public void setAccountId(long accountId) { } @Override - public long getMasterNodeCount() { - return masterNodeCount; + public long getControlNodeCount() { + return controlNodeCount; } - public void setMasterNodeCount(long masterNodeCount) { - this.masterNodeCount = masterNodeCount; + public void setControlNodeCount(long controlNodeCount) { + this.controlNodeCount = controlNodeCount; } @Override @@ -221,7 +221,7 @@ public void setNodeCount(long nodeCount) { @Override public long getTotalNodeCount() { - return this.masterNodeCount + this.nodeCount; + return this.controlNodeCount + this.nodeCount; } @Override @@ -308,7 +308,7 @@ public KubernetesClusterVO() { } public KubernetesClusterVO(String name, String description, long zoneId, long kubernetesVersionId, long serviceOfferingId, long templateId, - long networkId, long domainId, long accountId, long masterNodeCount, long nodeCount, State state, + long networkId, long domainId, long accountId, long controlNodeCount, long nodeCount, State state, String keyPair, long cores, long memory, Long nodeRootDiskSize, String endpoint) { this.uuid = UUID.randomUUID().toString(); this.name = name; @@ -320,7 +320,7 @@ public KubernetesClusterVO(String name, String description, long zoneId, long ku this.networkId = networkId; this.domainId = domainId; this.accountId = accountId; - this.masterNodeCount = masterNodeCount; + this.controlNodeCount = controlNodeCount; this.nodeCount = nodeCount; this.state = state; this.keyPair = keyPair; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 1ca58a0dfa68..5f663dff7849 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -231,9 +231,9 @@ public KubernetesClusterVmMapVO doInTransaction(TransactionStatus status) { }); } - private UserVm fetchMasterVmIfMissing(final UserVm masterVm) { - if (masterVm != null) { - return masterVm; + private UserVm fetchControlVmIfMissing(final UserVm controlVm) { + if (controlVm != null) { + return controlVm; } List clusterVMs = kubernetesClusterVmMapDao.listByClusterId(kubernetesCluster.getId()); if (CollectionUtils.isEmpty(clusterVMs)) { @@ -248,16 +248,16 @@ private UserVm fetchMasterVmIfMissing(final UserVm masterVm) { return userVmDao.findById(vmIds.get(0)); } - protected String getMasterVmPrivateIp() { + protected String getControlVmPrivateIp() { String ip = null; - UserVm vm = fetchMasterVmIfMissing(null); + UserVm vm = fetchControlVmIfMissing(null); if (vm != null) { ip = vm.getPrivateIpAddress(); } return ip; } - protected Pair getKubernetesClusterServerIpSshPort(UserVm masterVm) { + protected Pair getKubernetesClusterServerIpSshPort(UserVm controlVm) { int port = CLUSTER_NODES_DEFAULT_START_SSH_PORT; KubernetesClusterDetailsVO detail = kubernetesClusterDetailsDao.findDetail(kubernetesCluster.getId(), ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS); if (detail != null && !Strings.isNullOrEmpty(detail.getValue())) { @@ -283,12 +283,12 @@ protected Pair getKubernetesClusterServerIpSshPort(UserVm maste return new Pair<>(null, port); } else if (Network.GuestType.Shared.equals(network.getGuestType())) { port = 22; - masterVm = fetchMasterVmIfMissing(masterVm); - if (masterVm == null) { - LOGGER.warn(String.format("Unable to retrieve master VM for Kubernetes cluster : %s", kubernetesCluster.getName())); + controlVm = fetchControlVmIfMissing(controlVm); + if (controlVm == null) { + LOGGER.warn(String.format("Unable to retrieve control VM for Kubernetes cluster : %s", kubernetesCluster.getName())); return new Pair<>(null, port); } - return new Pair<>(masterVm.getPrivateIpAddress(), port); + return new Pair<>(controlVm.getPrivateIpAddress(), port); } LOGGER.warn(String.format("Unable to retrieve server IP address for Kubernetes cluster : %s", kubernetesCluster.getName())); return new Pair<>(null, port); diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java index 1fce00ba81d5..3e32d8ebf4ce 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterScaleWorker.java @@ -124,7 +124,7 @@ private void scaleKubernetesClusterNetworkRules(final List clusterVMIds, f throw new ManagementServerException("Firewall rule for node SSH access can't be provisioned"); } int existingFirewallRuleSourcePortEnd = firewallRule.getSourcePortEnd(); - final int scaledTotalNodeCount = clusterSize == null ? (int)kubernetesCluster.getTotalNodeCount() : (int)(clusterSize + kubernetesCluster.getMasterNodeCount()); + final int scaledTotalNodeCount = clusterSize == null ? (int)kubernetesCluster.getTotalNodeCount() : (int)(clusterSize + kubernetesCluster.getControlNodeCount()); // Provision new SSH firewall rules try { provisionFirewallRules(publicIp, owner, CLUSTER_NODES_DEFAULT_START_SSH_PORT, CLUSTER_NODES_DEFAULT_START_SSH_PORT + scaledTotalNodeCount - 1); @@ -170,7 +170,7 @@ private KubernetesClusterVO updateKubernetesClusterEntry(final Long newSize, fin final ServiceOffering serviceOffering = newServiceOffering == null ? serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()) : newServiceOffering; final Long serviceOfferingId = newServiceOffering == null ? null : serviceOffering.getId(); - final long size = newSize == null ? kubernetesCluster.getTotalNodeCount() : (newSize + kubernetesCluster.getMasterNodeCount()); + final long size = newSize == null ? kubernetesCluster.getTotalNodeCount() : (newSize + kubernetesCluster.getControlNodeCount()); final long cores = serviceOffering.getCpu() * size; final long memory = serviceOffering.getRamSize() * size; KubernetesClusterVO kubernetesClusterVO = updateKubernetesClusterEntry(cores, memory, newSize, serviceOfferingId); @@ -309,7 +309,7 @@ private void scaleDownKubernetesClusterSize() throws CloudRuntimeException { final List originalVmList = getKubernetesClusterVMMaps(); int i = originalVmList.size() - 1; List removedVmIds = new ArrayList<>(); - while (i >= kubernetesCluster.getMasterNodeCount() + clusterSize) { + while (i >= kubernetesCluster.getControlNodeCount() + clusterSize) { KubernetesClusterVmMapVO vmMapVO = originalVmList.get(i); UserVmVO userVM = userVmDao.findById(vmMapVO.getVmId()); if (!removeKubernetesClusterNode(publicIpAddress, sshPort, userVM, 3, 30000)) { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java index 4a96b9ed2bab..54c3a6228d13 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterStartWorker.java @@ -89,8 +89,8 @@ public KubernetesSupportedVersion getKubernetesClusterVersion() { return kubernetesClusterVersion; } - private Pair> getKubernetesMasterIpAddresses(final DataCenter zone, final Network network, final Account account) throws InsufficientAddressCapacityException { - String masterIp = null; + private Pair> getKubernetesControlIpAddresses(final DataCenter zone, final Network network, final Account account) throws InsufficientAddressCapacityException { + String controlIp = null; Map requestedIps = null; if (Network.GuestType.Shared.equals(network.getGuestType())) { List vlanIds = new ArrayList<>(); @@ -100,16 +100,16 @@ private Pair> getKubernetesMasterIpAddres } PublicIp ip = ipAddressManager.getAvailablePublicIpAddressFromVlans(zone.getId(), null, account, Vlan.VlanType.DirectAttached, vlanIds,network.getId(), null, false); if (ip != null) { - masterIp = ip.getAddress().toString(); + controlIp = ip.getAddress().toString(); } requestedIps = new HashMap<>(); Ip ipAddress = ip.getAddress(); boolean isIp6 = ipAddress.isIp6(); requestedIps.put(network.getId(), new Network.IpAddresses(ipAddress.isIp4() ? ip.getAddress().addr() : null, null)); } else { - masterIp = ipAddressManager.acquireGuestIpAddress(networkDao.findById(kubernetesCluster.getNetworkId()), null); + controlIp = ipAddressManager.acquireGuestIpAddress(networkDao.findById(kubernetesCluster.getNetworkId()), null); } - return new Pair<>(masterIp, requestedIps); + return new Pair<>(controlIp, requestedIps); } private boolean isKubernetesVersionSupportsHA() { @@ -127,10 +127,10 @@ private boolean isKubernetesVersionSupportsHA() { return haSupported; } - private String getKubernetesMasterConfig(final String masterIp, final String serverIp, - final String hostName, final boolean haSupported, - final boolean ejectIso) throws IOException { - String k8sMasterConfig = readResourceFile("/conf/k8s-master.yml"); + private String getKubernetesControlConfig(final String controlIp, final String serverIp, + final String hostName, final boolean haSupported, + final boolean ejectIso) throws IOException { + String k8sControlConfig = readResourceFile("/conf/k8s-control-node.yml"); final String apiServerCert = "{{ k8s_master.apiserver.crt }}"; final String apiServerKey = "{{ k8s_master.apiserver.key }}"; final String caCert = "{{ k8s_master.ca.crt }}"; @@ -139,8 +139,8 @@ private String getKubernetesMasterConfig(final String masterIp, final String ser final String clusterInitArgsKey = "{{ k8s_master.cluster.initargs }}"; final String ejectIsoKey = "{{ k8s.eject.iso }}"; final List addresses = new ArrayList<>(); - addresses.add(masterIp); - if (!serverIp.equals(masterIp)) { + addresses.add(controlIp); + if (!serverIp.equals(controlIp)) { addresses.add(serverIp); } final Certificate certificate = caManager.issueCertificate(null, Arrays.asList(hostName, "kubernetes", @@ -149,9 +149,9 @@ private String getKubernetesMasterConfig(final String masterIp, final String ser final String tlsClientCert = CertUtils.x509CertificateToPem(certificate.getClientCertificate()); final String tlsPrivateKey = CertUtils.privateKeyToPem(certificate.getPrivateKey()); final String tlsCaCert = CertUtils.x509CertificatesToPem(certificate.getCaCertificates()); - k8sMasterConfig = k8sMasterConfig.replace(apiServerCert, tlsClientCert.replace("\n", "\n ")); - k8sMasterConfig = k8sMasterConfig.replace(apiServerKey, tlsPrivateKey.replace("\n", "\n ")); - k8sMasterConfig = k8sMasterConfig.replace(caCert, tlsCaCert.replace("\n", "\n ")); + k8sControlConfig = k8sControlConfig.replace(apiServerCert, tlsClientCert.replace("\n", "\n ")); + k8sControlConfig = k8sControlConfig.replace(apiServerKey, tlsPrivateKey.replace("\n", "\n ")); + k8sControlConfig = k8sControlConfig.replace(caCert, tlsCaCert.replace("\n", "\n ")); String pubKey = "- \"" + configurationDao.getValue("ssh.publickey") + "\""; String sshKeyPair = kubernetesCluster.getKeyPair(); if (!Strings.isNullOrEmpty(sshKeyPair)) { @@ -160,8 +160,8 @@ private String getKubernetesMasterConfig(final String masterIp, final String ser pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; } } - k8sMasterConfig = k8sMasterConfig.replace(sshPubKey, pubKey); - k8sMasterConfig = k8sMasterConfig.replace(clusterToken, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); + k8sControlConfig = k8sControlConfig.replace(sshPubKey, pubKey); + k8sControlConfig = k8sControlConfig.replace(clusterToken, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); String initArgs = ""; if (haSupported) { initArgs = String.format("--control-plane-endpoint %s:%d --upload-certs --certificate-key %s ", @@ -171,55 +171,55 @@ private String getKubernetesMasterConfig(final String masterIp, final String ser } initArgs += String.format("--apiserver-cert-extra-sans=%s", serverIp); initArgs += String.format(" --kubernetes-version=%s", getKubernetesClusterVersion().getSemanticVersion()); - k8sMasterConfig = k8sMasterConfig.replace(clusterInitArgsKey, initArgs); - k8sMasterConfig = k8sMasterConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); - return k8sMasterConfig; + k8sControlConfig = k8sControlConfig.replace(clusterInitArgsKey, initArgs); + k8sControlConfig = k8sControlConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); + return k8sControlConfig; } - private UserVm createKubernetesMaster(final Network network, String serverIp) throws ManagementServerException, + private UserVm createKubernetesControlNode(final Network network, String serverIp) throws ManagementServerException, ResourceUnavailableException, InsufficientCapacityException { - UserVm masterVm = null; + UserVm controlVm = null; DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); List networkIds = new ArrayList(); networkIds.add(kubernetesCluster.getNetworkId()); - Pair> ipAddresses = getKubernetesMasterIpAddresses(zone, network, owner); - String masterIp = ipAddresses.first(); + Pair> ipAddresses = getKubernetesControlIpAddresses(zone, network, owner); + String controlIp = ipAddresses.first(); Map requestedIps = ipAddresses.second(); if (Network.GuestType.Shared.equals(network.getGuestType()) && Strings.isNullOrEmpty(serverIp)) { - serverIp = masterIp; + serverIp = controlIp; } - Network.IpAddresses addrs = new Network.IpAddresses(masterIp, null); + Network.IpAddresses addrs = new Network.IpAddresses(controlIp, null); long rootDiskSize = kubernetesCluster.getNodeRootDiskSize(); Map customParameterMap = new HashMap(); if (rootDiskSize > 0) { customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize)); } - String hostName = kubernetesClusterNodeNamePrefix + "-master"; - if (kubernetesCluster.getMasterNodeCount() > 1) { + String hostName = kubernetesClusterNodeNamePrefix + "-control"; + if (kubernetesCluster.getControlNodeCount() > 1) { hostName += "-1"; } hostName = getKubernetesClusterNodeAvailableName(hostName); boolean haSupported = isKubernetesVersionSupportsHA(); - String k8sMasterConfig = null; + String k8sControlConfig = null; try { - k8sMasterConfig = getKubernetesMasterConfig(masterIp, serverIp, hostName, haSupported, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())); + k8sControlConfig = getKubernetesControlConfig(controlIp, serverIp, hostName, haSupported, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())); } catch (IOException e) { - logAndThrow(Level.ERROR, "Failed to read Kubernetes master configuration file", e); + logAndThrow(Level.ERROR, "Failed to read Kubernetes control configuration file", e); } - String base64UserData = Base64.encodeBase64String(k8sMasterConfig.getBytes(StringUtils.getPreferredCharset())); - masterVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, + String base64UserData = Base64.encodeBase64String(k8sControlConfig.getBytes(StringUtils.getPreferredCharset())); + controlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(), requestedIps, addrs, null, null, null, customParameterMap, null, null, null, null); if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Created master VM ID: %s, %s in the Kubernetes cluster : %s", masterVm.getUuid(), hostName, kubernetesCluster.getName())); + LOGGER.info(String.format("Created control VM ID: %s, %s in the Kubernetes cluster : %s", controlVm.getUuid(), hostName, kubernetesCluster.getName())); } - return masterVm; + return controlVm; } - private String getKubernetesAdditionalMasterConfig(final String joinIp, final boolean ejectIso) throws IOException { - String k8sMasterConfig = readResourceFile("/conf/k8s-master-add.yml"); + private String getKubernetesAdditionalControlConfig(final String joinIp, final boolean ejectIso) throws IOException { + String k8sControlConfig = readResourceFile("/conf/k8s-control-node-add.yml"); final String joinIpKey = "{{ k8s_master.join_ip }}"; final String clusterTokenKey = "{{ k8s_master.cluster.token }}"; final String sshPubKey = "{{ k8s.ssh.pub.key }}"; @@ -233,17 +233,17 @@ private String getKubernetesAdditionalMasterConfig(final String joinIp, final bo pubKey += "\n - \"" + sshkp.getPublicKey() + "\""; } } - k8sMasterConfig = k8sMasterConfig.replace(sshPubKey, pubKey); - k8sMasterConfig = k8sMasterConfig.replace(joinIpKey, joinIp); - k8sMasterConfig = k8sMasterConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); - k8sMasterConfig = k8sMasterConfig.replace(clusterHACertificateKey, KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster)); - k8sMasterConfig = k8sMasterConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); - return k8sMasterConfig; + k8sControlConfig = k8sControlConfig.replace(sshPubKey, pubKey); + k8sControlConfig = k8sControlConfig.replace(joinIpKey, joinIp); + k8sControlConfig = k8sControlConfig.replace(clusterTokenKey, KubernetesClusterUtil.generateClusterToken(kubernetesCluster)); + k8sControlConfig = k8sControlConfig.replace(clusterHACertificateKey, KubernetesClusterUtil.generateClusterHACertificateKey(kubernetesCluster)); + k8sControlConfig = k8sControlConfig.replace(ejectIsoKey, String.valueOf(ejectIso)); + return k8sControlConfig; } - private UserVm createKubernetesAdditionalMaster(final String joinIp, final int additionalMasterNodeInstance) throws ManagementServerException, + private UserVm createKubernetesAdditionalControlNode(final String joinIp, final int additionalControlNodeInstance) throws ManagementServerException, ResourceUnavailableException, InsufficientCapacityException { - UserVm additionalMasterVm = null; + UserVm additionalControlVm = null; DataCenter zone = dataCenterDao.findById(kubernetesCluster.getZoneId()); ServiceOffering serviceOffering = serviceOfferingDao.findById(kubernetesCluster.getServiceOfferingId()); List networkIds = new ArrayList(); @@ -254,50 +254,50 @@ private UserVm createKubernetesAdditionalMaster(final String joinIp, final int a if (rootDiskSize > 0) { customParameterMap.put("rootdisksize", String.valueOf(rootDiskSize)); } - String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-master-%d", kubernetesClusterNodeNamePrefix, additionalMasterNodeInstance + 1)); - String k8sMasterConfig = null; + String hostName = getKubernetesClusterNodeAvailableName(String.format("%s-control-%d", kubernetesClusterNodeNamePrefix, additionalControlNodeInstance + 1)); + String k8sControlConfig = null; try { - k8sMasterConfig = getKubernetesAdditionalMasterConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())); + k8sControlConfig = getKubernetesAdditionalControlConfig(joinIp, Hypervisor.HypervisorType.VMware.equals(clusterTemplate.getHypervisorType())); } catch (IOException e) { - logAndThrow(Level.ERROR, "Failed to read Kubernetes master configuration file", e); + logAndThrow(Level.ERROR, "Failed to read Kubernetes control configuration file", e); } - String base64UserData = Base64.encodeBase64String(k8sMasterConfig.getBytes(StringUtils.getPreferredCharset())); - additionalMasterVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, + String base64UserData = Base64.encodeBase64String(k8sControlConfig.getBytes(StringUtils.getPreferredCharset())); + additionalControlVm = userVmService.createAdvancedVirtualMachine(zone, serviceOffering, clusterTemplate, networkIds, owner, hostName, hostName, null, null, null, Hypervisor.HypervisorType.None, BaseCmd.HTTPMethod.POST, base64UserData, kubernetesCluster.getKeyPair(), null, addrs, null, null, null, customParameterMap, null, null, null, null); if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Created master VM ID : %s, %s in the Kubernetes cluster : %s", additionalMasterVm.getUuid(), hostName, kubernetesCluster.getName())); + LOGGER.info(String.format("Created control VM ID : %s, %s in the Kubernetes cluster : %s", additionalControlVm.getUuid(), hostName, kubernetesCluster.getName())); } - return additionalMasterVm; + return additionalControlVm; } - private UserVm provisionKubernetesClusterMasterVm(final Network network, final String publicIpAddress) throws + private UserVm provisionKubernetesClusterControlVm(final Network network, final String publicIpAddress) throws ManagementServerException, InsufficientCapacityException, ResourceUnavailableException { - UserVm k8sMasterVM = null; - k8sMasterVM = createKubernetesMaster(network, publicIpAddress); - addKubernetesClusterVm(kubernetesCluster.getId(), k8sMasterVM.getId()); + UserVm k8sControlVM = null; + k8sControlVM = createKubernetesControlNode(network, publicIpAddress); + addKubernetesClusterVm(kubernetesCluster.getId(), k8sControlVM.getId()); if (kubernetesCluster.getNodeRootDiskSize() > 0) { - resizeNodeVolume(k8sMasterVM); + resizeNodeVolume(k8sControlVM); } - startKubernetesVM(k8sMasterVM); - k8sMasterVM = userVmDao.findById(k8sMasterVM.getId()); - if (k8sMasterVM == null) { - throw new ManagementServerException(String.format("Failed to provision master VM for Kubernetes cluster : %s" , kubernetesCluster.getName())); + startKubernetesVM(k8sControlVM); + k8sControlVM = userVmDao.findById(k8sControlVM.getId()); + if (k8sControlVM == null) { + throw new ManagementServerException(String.format("Failed to provision control VM for Kubernetes cluster : %s" , kubernetesCluster.getName())); } if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned the master VM : %s in to the Kubernetes cluster : %s", k8sMasterVM.getDisplayName(), kubernetesCluster.getName())); + LOGGER.info(String.format("Provisioned the control VM : %s in to the Kubernetes cluster : %s", k8sControlVM.getDisplayName(), kubernetesCluster.getName())); } - return k8sMasterVM; + return k8sControlVM; } - private List provisionKubernetesClusterAdditionalMasterVms(final String publicIpAddress) throws + private List provisionKubernetesClusterAdditionalControlVms(final String publicIpAddress) throws InsufficientCapacityException, ManagementServerException, ResourceUnavailableException { - List additionalMasters = new ArrayList<>(); - if (kubernetesCluster.getMasterNodeCount() > 1) { - for (int i = 1; i < kubernetesCluster.getMasterNodeCount(); i++) { + List additionalControlVms = new ArrayList<>(); + if (kubernetesCluster.getControlNodeCount() > 1) { + for (int i = 1; i < kubernetesCluster.getControlNodeCount(); i++) { UserVm vm = null; - vm = createKubernetesAdditionalMaster(publicIpAddress, i); + vm = createKubernetesAdditionalControlNode(publicIpAddress, i); addKubernetesClusterVm(kubernetesCluster.getId(), vm.getId()); if (kubernetesCluster.getNodeRootDiskSize() > 0) { resizeNodeVolume(vm); @@ -305,15 +305,15 @@ private List provisionKubernetesClusterAdditionalMasterVms(final String startKubernetesVM(vm); vm = userVmDao.findById(vm.getId()); if (vm == null) { - throw new ManagementServerException(String.format("Failed to provision additional master VM for Kubernetes cluster : %s" , kubernetesCluster.getName())); + throw new ManagementServerException(String.format("Failed to provision additional control VM for Kubernetes cluster : %s" , kubernetesCluster.getName())); } - additionalMasters.add(vm); + additionalControlVms.add(vm); if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Provisioned additional master VM : %s in to the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); + LOGGER.info(String.format("Provisioned additional control VM : %s in to the Kubernetes cluster : %s", vm.getDisplayName(), kubernetesCluster.getName())); } } } - return additionalMasters; + return additionalControlVms; } private Network startKubernetesClusterNetwork(final DeployDestination destination) throws ManagementServerException { @@ -348,10 +348,10 @@ private void provisionLoadBalancerRule(final IpAddress publicIp, final Network n account.getId(), false, NetUtils.TCP_PROTO, true); Map> vmIdIpMap = new HashMap<>(); - for (int i = 0; i < kubernetesCluster.getMasterNodeCount(); ++i) { + for (int i = 0; i < kubernetesCluster.getControlNodeCount(); ++i) { List ips = new ArrayList<>(); - Nic masterVmNic = networkModel.getNicInNetwork(clusterVMIds.get(i), kubernetesCluster.getNetworkId()); - ips.add(masterVmNic.getIPv4Address()); + Nic controlVmNic = networkModel.getNicInNetwork(clusterVMIds.get(i), kubernetesCluster.getNetworkId()); + ips.add(controlVmNic.getIPv4Address()); vmIdIpMap.put(clusterVMIds.get(i), ips); } lbService.assignToLoadBalancer(lb.getId(), null, vmIdIpMap); @@ -361,7 +361,7 @@ private void provisionLoadBalancerRule(final IpAddress publicIp, final Network n * Setup network rules for Kubernetes cluster * Open up firewall port CLUSTER_API_PORT, secure port on which Kubernetes * API server is running. Also create load balancing rule to forward public - * IP traffic to master VMs' private IP. + * IP traffic to control VMs' private IP. * Open up firewall ports NODES_DEFAULT_START_SSH_PORT to NODES_DEFAULT_START_SSH_PORT+n * for SSH access. Also create port-forwarding rule to forward public IP traffic to all * @param network @@ -405,7 +405,7 @@ private void setupKubernetesClusterNetworkRules(Network network, List cl throw new ManagementServerException(String.format("Failed to provision firewall rules for SSH access for the Kubernetes cluster : %s", kubernetesCluster.getName()), e); } - // Load balancer rule fo API access for master node VMs + // Load balancer rule fo API access for control node VMs try { provisionLoadBalancerRule(publicIp, network, owner, clusterVMIds, CLUSTER_API_PORT); } catch (NetworkRuleConflictException | InsufficientAddressCapacityException e) { @@ -450,9 +450,9 @@ private boolean isKubernetesClusterKubeConfigAvailable(final long timeoutTime) { } String kubeConfig = KubernetesClusterUtil.getKubernetesClusterConfig(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, sshKeyFile, timeoutTime); if (!Strings.isNullOrEmpty(kubeConfig)) { - final String masterVMPrivateIpAddress = getMasterVmPrivateIp(); - if (!Strings.isNullOrEmpty(masterVMPrivateIpAddress)) { - kubeConfig = kubeConfig.replace(String.format("server: https://%s:%d", masterVMPrivateIpAddress, CLUSTER_API_PORT), + final String controlVMPrivateIpAddress = getControlVmPrivateIp(); + if (!Strings.isNullOrEmpty(controlVMPrivateIpAddress)) { + kubeConfig = kubeConfig.replace(String.format("server: https://%s:%d", controlVMPrivateIpAddress, CLUSTER_API_PORT), String.format("server: https://%s:%d", publicIpAddress, CLUSTER_API_PORT)); } kubernetesClusterDetailsDao.addDetail(kubernetesCluster.getId(), "kubeConfigData", Base64.encodeBase64String(kubeConfig.getBytes(StringUtils.getPreferredCharset())), false); @@ -503,29 +503,29 @@ public boolean startKubernetesClusterOnCreate() { Pair publicIpSshPort = getKubernetesClusterServerIpSshPort(null); publicIpAddress = publicIpSshPort.first(); if (Strings.isNullOrEmpty(publicIpAddress) && - (Network.GuestType.Isolated.equals(network.getGuestType()) || kubernetesCluster.getMasterNodeCount() > 1)) { // Shared network, single-master cluster won't have an IP yet + (Network.GuestType.Isolated.equals(network.getGuestType()) || kubernetesCluster.getControlNodeCount() > 1)) { // Shared network, single-control node cluster won't have an IP yet logTransitStateAndThrow(Level.ERROR, String.format("Failed to start Kubernetes cluster : %s as no public IP found for the cluster" , kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); } List clusterVMs = new ArrayList<>(); - UserVm k8sMasterVM = null; + UserVm k8sControlVM = null; try { - k8sMasterVM = provisionKubernetesClusterMasterVm(network, publicIpAddress); + k8sControlVM = provisionKubernetesClusterControlVm(network, publicIpAddress); } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { - logTransitStateAndThrow(Level.ERROR, String.format("Provisioning the master VM failed in the Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); + logTransitStateAndThrow(Level.ERROR, String.format("Provisioning the control VM failed in the Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); } - clusterVMs.add(k8sMasterVM); + clusterVMs.add(k8sControlVM); if (Strings.isNullOrEmpty(publicIpAddress)) { - publicIpSshPort = getKubernetesClusterServerIpSshPort(k8sMasterVM); + publicIpSshPort = getKubernetesClusterServerIpSshPort(k8sControlVM); publicIpAddress = publicIpSshPort.first(); if (Strings.isNullOrEmpty(publicIpAddress)) { logTransitStateAndThrow(Level.WARN, String.format("Failed to start Kubernetes cluster : %s as no public IP found for the cluster", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed); } } try { - List additionalMasterVMs = provisionKubernetesClusterAdditionalMasterVms(publicIpAddress); - clusterVMs.addAll(additionalMasterVMs); + List additionalControlVMs = provisionKubernetesClusterAdditionalControlVms(publicIpAddress); + clusterVMs.addAll(additionalControlVMs); } catch (CloudRuntimeException | ManagementServerException | ResourceUnavailableException | InsufficientCapacityException e) { - logTransitStateAndThrow(Level.ERROR, String.format("Provisioning additional master VM failed in the Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); + logTransitStateAndThrow(Level.ERROR, String.format("Provisioning additional control VM failed in the Kubernetes cluster : %s", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); } try { List nodeVMs = provisionKubernetesClusterNodeVms(kubernetesCluster.getNodeCount(), publicIpAddress); @@ -542,9 +542,9 @@ public boolean startKubernetesClusterOnCreate() { logTransitStateAndThrow(Level.ERROR, String.format("Failed to setup Kubernetes cluster : %s, unable to setup network rules", kubernetesCluster.getName()), kubernetesCluster.getId(), KubernetesCluster.Event.CreateFailed, e); } attachIsoKubernetesVMs(clusterVMs); - if (!KubernetesClusterUtil.isKubernetesClusterMasterVmRunning(kubernetesCluster, publicIpAddress, publicIpSshPort.second(), startTimeoutTime)) { - String msg = String.format("Failed to setup Kubernetes cluster : %s in usable state as unable to access master node VMs of the cluster", kubernetesCluster.getName()); - if (kubernetesCluster.getMasterNodeCount() > 1 && Network.GuestType.Shared.equals(network.getGuestType())) { + if (!KubernetesClusterUtil.isKubernetesClusterControlVmRunning(kubernetesCluster, publicIpAddress, publicIpSshPort.second(), startTimeoutTime)) { + String msg = String.format("Failed to setup Kubernetes cluster : %s in usable state as unable to access control node VMs of the cluster", kubernetesCluster.getName()); + if (kubernetesCluster.getControlNodeCount() > 1 && Network.GuestType.Shared.equals(network.getGuestType())) { msg = String.format("%s. Make sure external load-balancer has port forwarding rules for SSH access on ports %d-%d and API access on port %d", msg, CLUSTER_NODES_DEFAULT_START_SSH_PORT, diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java index 957adea6f774..86c5c8ed70bc 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterUpgradeWorker.java @@ -123,9 +123,9 @@ private void upgradeKubernetesClusterNodes() { if (!KubernetesClusterUtil.uncordonKubernetesClusterNode(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, getManagementServerSshPublicKeyFile(), vm, upgradeTimeoutTime, 15000)) { logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to uncordon Kubernetes node on VM : %s", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } - if (i == 0) { // Wait for master to get in Ready state + if (i == 0) { // Wait for control node to get in Ready state if (!KubernetesClusterUtil.isKubernetesClusterNodeReady(kubernetesCluster, publicIpAddress, sshPort, CLUSTER_NODE_VM_USER, getManagementServerSshPublicKeyFile(), hostName, upgradeTimeoutTime, 15000)) { - logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to get master Kubernetes node on VM : %s in ready state", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); + logTransitStateDetachIsoAndThrow(Level.ERROR, String.format("Failed to upgrade Kubernetes cluster : %s, unable to get control Kubernetes node on VM : %s in ready state", kubernetesCluster.getName(), vm.getDisplayName()), kubernetesCluster, clusterVMs, KubernetesCluster.Event.OperationFailed, null); } } if (LOGGER.isInfoEnabled()) { diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java index b9bda0840d12..48a39f52e0c1 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java @@ -254,25 +254,25 @@ public static boolean isKubernetesClusterServerRunning(final KubernetesCluster k return k8sApiServerSetup; } - public static boolean isKubernetesClusterMasterVmRunning(final KubernetesCluster kubernetesCluster, final String ipAddress, - final int port, final long timeoutTime) { - boolean masterVmRunning = false; - while (!masterVmRunning && System.currentTimeMillis() < timeoutTime) { + public static boolean isKubernetesClusterControlVmRunning(final KubernetesCluster kubernetesCluster, final String ipAddress, + final int port, final long timeoutTime) { + boolean controlVmRunning = false; + while (!controlVmRunning && System.currentTimeMillis() < timeoutTime) { try (Socket socket = new Socket()) { socket.connect(new InetSocketAddress(ipAddress, port), 10000); - masterVmRunning = true; + controlVmRunning = true; } catch (IOException e) { if (LOGGER.isInfoEnabled()) { - LOGGER.info(String.format("Waiting for Kubernetes cluster : %s master node VMs to be accessible", kubernetesCluster.getName())); + LOGGER.info(String.format("Waiting for Kubernetes cluster : %s control node VMs to be accessible", kubernetesCluster.getName())); } try { Thread.sleep(10000); } catch (InterruptedException ex) { - LOGGER.warn(String.format("Error while waiting for Kubernetes cluster : %s master node VMs to be accessible", kubernetesCluster.getName()), ex); + LOGGER.warn(String.format("Error while waiting for Kubernetes cluster : %s control node VMs to be accessible", kubernetesCluster.getName()), ex); } } } - return masterVmRunning; + return controlVmRunning; } public static boolean validateKubernetesClusterReadyNodesCount(final KubernetesCluster kubernetesCluster, diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java index 54e307c0c5b9..8921d691142b 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/command/user/kubernetes/cluster/CreateKubernetesClusterCmd.java @@ -109,9 +109,14 @@ public class CreateKubernetesClusterCmd extends BaseAsyncCreateCmd { private String sshKeyPairName; @Parameter(name=ApiConstants.MASTER_NODES, type = CommandType.LONG, - description = "number of Kubernetes cluster master nodes, default is 1") + description = "number of Kubernetes cluster master nodes, default is 1. This option is deprecated, please use 'controlnodes' parameter.") + @Deprecated private Long masterNodes; + @Parameter(name=ApiConstants.CONTROL_NODES, type = CommandType.LONG, + description = "number of Kubernetes cluster control nodes, default is 1") + private Long controlNodes; + @Parameter(name=ApiConstants.EXTERNAL_LOAD_BALANCER_IP_ADDRESS, type = CommandType.STRING, description = "external load balancer IP address while using shared network with Kubernetes HA cluster") private String externalLoadBalancerIpAddress; @@ -191,6 +196,13 @@ public Long getMasterNodes() { return masterNodes; } + public Long getControlNodes() { + if (controlNodes == null) { + return 1L; + } + return controlNodes; + } + public String getExternalLoadBalancerIpAddress() { return externalLoadBalancerIpAddress; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java index bb3f14f56891..682aaaca8126 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesClusterResponse.java @@ -101,10 +101,15 @@ public class KubernetesClusterResponse extends BaseResponse implements Controlle @Param(description = "keypair details") private String keypair; + @Deprecated @SerializedName(ApiConstants.MASTER_NODES) @Param(description = "the master nodes count for the Kubernetes cluster") private Long masterNodes; + @SerializedName(ApiConstants.CONTROL_NODES) + @Param(description = "the control nodes count for the Kubernetes cluster") + private Long controlNodes; + @SerializedName(ApiConstants.SIZE) @Param(description = "the size (worker nodes count) of the Kubernetes cluster") private Long clusterSize; @@ -269,6 +274,14 @@ public void setMasterNodes(Long masterNodes) { this.masterNodes = masterNodes; } + public Long getControlNodes() { + return controlNodes; + } + + public void setControlNodes(Long controlNodes) { + this.controlNodes = controlNodes; + } + public Long getClusterSize() { return clusterSize; } diff --git a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java index 4deb50d4a0b5..449bd9570551 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java +++ b/plugins/integrations/kubernetes-service/src/main/java/org/apache/cloudstack/api/response/KubernetesSupportedVersionResponse.java @@ -61,7 +61,7 @@ public class KubernetesSupportedVersionResponse extends BaseResponse { private String zoneName; @SerializedName(ApiConstants.SUPPORTS_HA) - @Param(description = "whether Kubernetes supported version supports HA, multi-master") + @Param(description = "whether Kubernetes supported version supports HA, multi-control nodes") private Boolean supportsHA; @SerializedName(ApiConstants.STATE) diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml similarity index 100% rename from plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master-add.yml rename to plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node-add.yml diff --git a/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml b/plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml similarity index 100% rename from plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-master.yml rename to plugins/integrations/kubernetes-service/src/main/resources/conf/k8s-control-node.yml diff --git a/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh b/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh index ea36d7ee8970..d66176028d6c 100644 --- a/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh +++ b/plugins/integrations/kubernetes-service/src/main/resources/script/upgrade-kubernetes.sh @@ -18,14 +18,14 @@ # Version 1.14 and below needs extra flags with kubeadm upgrade node if [ $# -lt 4 ]; then - echo "Invalid input. Valid usage: ./upgrade-kubernetes.sh UPGRADE_VERSION IS_MASTER IS_OLD_VERSION IS_EJECT_ISO" + echo "Invalid input. Valid usage: ./upgrade-kubernetes.sh UPGRADE_VERSION IS_CONTROL_NODE IS_OLD_VERSION IS_EJECT_ISO" echo "eg: ./upgrade-kubernetes.sh 1.16.3 true false false" exit 1 fi UPGRADE_VERSION="${1}" -IS_MAIN_MASTER="" +IS_MAIN_CONTROL="" if [ $# -gt 1 ]; then - IS_MAIN_MASTER="${2}" + IS_MAIN_CONTROL="${2}" fi IS_OLD_VERSION="" if [ $# -gt 2 ]; then @@ -100,7 +100,7 @@ if [ -d "$BINARIES_DIR" ]; then tar -f "${BINARIES_DIR}/cni/cni-plugins-amd64.tgz" -C /opt/cni/bin -xz tar -f "${BINARIES_DIR}/cri-tools/crictl-linux-amd64.tar.gz" -C /opt/bin -xz - if [ "${IS_MAIN_MASTER}" == 'true' ]; then + if [ "${IS_MAIN_CONTROL}" == 'true' ]; then set +e kubeadm upgrade apply ${UPGRADE_VERSION} -y retval=$? @@ -121,7 +121,7 @@ if [ -d "$BINARIES_DIR" ]; then chmod +x {kubelet,kubectl} systemctl restart kubelet - if [ "${IS_MAIN_MASTER}" == 'true' ]; then + if [ "${IS_MAIN_CONTROL}" == 'true' ]; then kubectl apply -f ${BINARIES_DIR}/network.yaml kubectl apply -f ${BINARIES_DIR}/dashboard.yaml fi diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/agent/api/GetControllerDataAnswer.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/agent/api/GetControllerDataAnswer.java index 9f200d838dbd..84dd6b346ad1 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/agent/api/GetControllerDataAnswer.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/agent/api/GetControllerDataAnswer.java @@ -21,26 +21,26 @@ public class GetControllerDataAnswer extends Answer { private final String _ipAddress; - private final boolean _isMaster; + private final boolean _isPrimary; public GetControllerDataAnswer(final GetControllerDataCommand cmd, - final String ipAddress, final boolean isMaster){ + final String ipAddress, final boolean isPrimary){ super(cmd); this._ipAddress = ipAddress; - this._isMaster = isMaster; + this._isPrimary = isPrimary; } public GetControllerDataAnswer(final Command command, final Exception e) { super(command, e); this._ipAddress = null; - this._isMaster = false; + this._isPrimary = false; } public String getIpAddress() { return _ipAddress; } - public boolean isMaster() { - return _isMaster; + public boolean isPrimary() { + return _isPrimary; } } diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/agent/api/GetControllerHostsAnswer.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/agent/api/GetControllerHostsAnswer.java index e4c889cbd998..f9a49c8bb57a 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/agent/api/GetControllerHostsAnswer.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/agent/api/GetControllerHostsAnswer.java @@ -22,19 +22,19 @@ import com.cloud.host.HostVO; public class GetControllerHostsAnswer { - private HostVO master; - private HostVO slave; + private HostVO primary; + private HostVO secondary; - public HostVO getMaster() { - return master; + public HostVO getPrimary() { + return primary; } - public void setMaster(final HostVO master) { - this.master = master; + public void setPrimary(final HostVO primary) { + this.primary = primary; } - public HostVO getSlave() { - return slave; + public HostVO getSecondary() { + return secondary; } - public void setSlave(final HostVO slave) { - this.slave = slave; + public void setSecondary(final HostVO secondary) { + this.secondary = secondary; } } diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfApi.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfApi.java index c1d10dd3086b..86396d9b02f8 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfApi.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfApi.java @@ -72,7 +72,7 @@ public class BigSwitchBcfApi { private String zoneId; private Boolean nat; - private boolean isMaster; + private boolean isPrimary; private int _port = 8000; @@ -241,7 +241,7 @@ public String syncTopology(final TopologyData topo) throws BigSwitchBcfApiExcept } public ControllerData getControllerData() { - return new ControllerData(host, isMaster); + return new ControllerData(host, isPrimary); } private void checkInvariants() throws BigSwitchBcfApiException{ @@ -274,7 +274,7 @@ private String checkResponse(final HttpMethodBase m, final String errorMessageBa throw new BigSwitchBcfApiException("BCF topology sync required", true); } if (m.getStatusCode() == HttpStatus.SC_SEE_OTHER) { - isMaster = false; + isPrimary = false; set_hash(HASH_IGNORE); return HASH_IGNORE; } @@ -402,10 +402,10 @@ protected T executeRetrieveObject(final Type returnObjectType, } if(returnValue instanceof ControlClusterStatus) { if(HASH_CONFLICT.equals(hash)) { - isMaster = true; + isPrimary = true; ((ControlClusterStatus) returnValue).setTopologySyncRequested(true); - } else if (!HASH_IGNORE.equals(hash) && !isMaster) { - isMaster = true; + } else if (!HASH_IGNORE.equals(hash) && !isPrimary) { + isPrimary = true; ((ControlClusterStatus) returnValue).setTopologySyncRequested(true); } } diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfUtils.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfUtils.java index da409df98359..449aa3c8bad7 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfUtils.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/BigSwitchBcfUtils.java @@ -133,10 +133,10 @@ public ControlClusterData getControlClusterData(long physicalNetworkId){ _hostDao.loadDetails(bigswitchBcfHost); GetControllerDataAnswer answer = (GetControllerDataAnswer) _agentMgr.easySend(bigswitchBcfHost.getId(), cmd); if (answer != null){ - if (answer.isMaster()) { - cluster.setMaster(bigswitchBcfHost); + if (answer.isPrimary()) { + cluster.setPrimary(bigswitchBcfHost); } else { - cluster.setSlave(bigswitchBcfHost); + cluster.setSecondary(bigswitchBcfHost); } } } @@ -471,14 +471,14 @@ public String syncTopologyToBcfHost(HostVO bigswitchBcfHost, boolean natEnabled) public BcfAnswer sendBcfCommandWithNetworkSyncCheck(BcfCommand cmd, Network network)throws IllegalArgumentException{ // get registered Big Switch controller ControlClusterData cluster = getControlClusterData(network.getPhysicalNetworkId()); - if(cluster.getMaster()==null){ + if(cluster.getPrimary()==null){ return new BcfAnswer(cmd, new CloudRuntimeException("Big Switch Network controller temporarily unavailable")); } TopologyData topo = getTopology(network.getPhysicalNetworkId()); cmd.setTopology(topo); - BcfAnswer answer = (BcfAnswer) _agentMgr.easySend(cluster.getMaster().getId(), cmd); + BcfAnswer answer = (BcfAnswer) _agentMgr.easySend(cluster.getPrimary().getId(), cmd); if (answer == null || !answer.getResult()) { s_logger.error ("BCF API Command failed"); @@ -487,17 +487,17 @@ public BcfAnswer sendBcfCommandWithNetworkSyncCheck(BcfCommand cmd, Network netw String newHash = answer.getHash(); if (cmd.isTopologySyncRequested()) { - newHash = syncTopologyToBcfHost(cluster.getMaster()); + newHash = syncTopologyToBcfHost(cluster.getPrimary()); } if(newHash != null){ commitTopologyHash(network.getPhysicalNetworkId(), newHash); } - HostVO slave = cluster.getSlave(); - if(slave != null){ + HostVO secondary = cluster.getSecondary(); + if(secondary != null){ TopologyData newTopo = getTopology(network.getPhysicalNetworkId()); CacheBcfTopologyCommand cacheCmd = new CacheBcfTopologyCommand(newTopo); - _agentMgr.easySend(cluster.getSlave().getId(), cacheCmd); + _agentMgr.easySend(cluster.getSecondary().getId(), cacheCmd); } return answer; diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/ControlClusterData.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/ControlClusterData.java index 7d628994a5ba..05edbc4f5531 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/ControlClusterData.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/ControlClusterData.java @@ -22,22 +22,22 @@ import com.cloud.host.HostVO; public class ControlClusterData { - private HostVO master; - private HostVO slave; + private HostVO primary; + private HostVO secondary; - public HostVO getMaster() { - return master; + public HostVO getPrimary() { + return primary; } - public void setMaster(HostVO master) { - this.master = master; + public void setPrimary(HostVO primary) { + this.primary = primary; } - public HostVO getSlave() { - return slave; + public HostVO getSecondary() { + return secondary; } - public void setSlave(HostVO slave) { - this.slave = slave; + public void setSecondary(HostVO secondary) { + this.secondary = secondary; } } diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/ControllerData.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/ControllerData.java index 224a7ab1fd72..ee16a3ce88f8 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/ControllerData.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/bigswitch/ControllerData.java @@ -21,19 +21,19 @@ public class ControllerData { private final String ipAddress; - private final boolean isMaster; + private final boolean isPrimary; - public ControllerData(String ipAddress, boolean isMaster) { + public ControllerData(String ipAddress, boolean isPrimary) { this.ipAddress = ipAddress; - this.isMaster = isMaster; + this.isPrimary = isPrimary; } public String getIpAddress() { return ipAddress; } - public boolean isMaster() { - return isMaster; + public boolean isPrimary() { + return isPrimary; } } diff --git a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/resource/BigSwitchBcfResource.java b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/resource/BigSwitchBcfResource.java index a43cad313715..de33b8ae7b44 100644 --- a/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/resource/BigSwitchBcfResource.java +++ b/plugins/network-elements/bigswitch/src/main/java/com/cloud/network/resource/BigSwitchBcfResource.java @@ -563,7 +563,7 @@ private Answer executeRequest(GetControllerDataCommand cmd, int numRetries) { ControllerData controller = _bigswitchBcfApi.getControllerData(); return new GetControllerDataAnswer(cmd, controller.getIpAddress(), - controller.isMaster()); + controller.isPrimary()); } private Answer executeRequest(ReadyCommand cmd) { diff --git a/plugins/network-elements/bigswitch/src/test/java/com/cloud/network/bigswitch/BigSwitchApiTest.java b/plugins/network-elements/bigswitch/src/test/java/com/cloud/network/bigswitch/BigSwitchApiTest.java index 5d40e5dfa8db..207f0ab32c7b 100644 --- a/plugins/network-elements/bigswitch/src/test/java/com/cloud/network/bigswitch/BigSwitchApiTest.java +++ b/plugins/network-elements/bigswitch/src/test/java/com/cloud/network/bigswitch/BigSwitchApiTest.java @@ -254,13 +254,13 @@ public void testExecuteCreateObjectConflict() throws BigSwitchBcfApiException, I } @Test - public void testExecuteCreateObjectSlave() throws BigSwitchBcfApiException, IOException { + public void testExecuteCreateObjectSecondary() throws BigSwitchBcfApiException, IOException { NetworkData network = new NetworkData(); _method = mock(PostMethod.class); when(_method.getStatusCode()).thenReturn(HttpStatus.SC_SEE_OTHER); String hash = _api.executeCreateObject(network, "/", Collections. emptyMap()); assertEquals(hash, BigSwitchBcfApi.HASH_IGNORE); - assertEquals(_api.getControllerData().isMaster(), false); + assertEquals(_api.getControllerData().isPrimary(), false); } @Test(expected = BigSwitchBcfApiException.class) @@ -320,7 +320,7 @@ public void testExecuteUpdateObjectConflict() throws BigSwitchBcfApiException, I } @Test - public void testExecuteUpdateObjectSlave() throws BigSwitchBcfApiException, IOException { + public void testExecuteUpdateObjectSecondary() throws BigSwitchBcfApiException, IOException { NetworkData network = new NetworkData(); _method = mock(PutMethod.class); when(_method.getStatusCode()).thenReturn(HttpStatus.SC_SEE_OTHER); @@ -396,7 +396,7 @@ public void testExecuteDeleteObjectException() throws BigSwitchBcfApiException, } @Test - public void testExecuteRetrieveControllerMasterStatus() throws BigSwitchBcfApiException, IOException { + public void testExecuteRetrieveControllerPrimaryStatus() throws BigSwitchBcfApiException, IOException { _method = mock(GetMethod.class); when(_method.getStatusCode()).thenReturn(HttpStatus.SC_OK); when(((HttpMethodBase)_method).getResponseBodyAsString(2048)).thenReturn("{'healthy': true, 'topologySyncRequested': false}"); @@ -404,11 +404,11 @@ public void testExecuteRetrieveControllerMasterStatus() throws BigSwitchBcfApiEx }.getType(), "/", null); verify(_method, times(1)).releaseConnection(); verify(_client, times(1)).executeMethod(_method); - assertEquals(_api.getControllerData().isMaster(), true); + assertEquals(_api.getControllerData().isPrimary(), true); } @Test - public void testExecuteRetrieveControllerMasterStatusWithTopoConflict() throws BigSwitchBcfApiException, IOException { + public void testExecuteRetrieveControllerPrimaryStatusWithTopoConflict() throws BigSwitchBcfApiException, IOException { _method = mock(GetMethod.class); when(_method.getStatusCode()).thenReturn(HttpStatus.SC_CONFLICT); when(((HttpMethodBase)_method).getResponseBodyAsString(2048)).thenReturn("{'healthy': true, 'topologySyncRequested': true}"); @@ -416,11 +416,11 @@ public void testExecuteRetrieveControllerMasterStatusWithTopoConflict() throws B }.getType(), "/", null); verify(_method, times(1)).releaseConnection(); verify(_client, times(1)).executeMethod(_method); - assertEquals(_api.getControllerData().isMaster(), true); + assertEquals(_api.getControllerData().isPrimary(), true); } @Test - public void testExecuteRetrieveControllerSlaveStatus() throws BigSwitchBcfApiException, IOException { + public void testExecuteRetrieveControllerSecondaryStatus() throws BigSwitchBcfApiException, IOException { _method = mock(GetMethod.class); when(_method.getStatusCode()).thenReturn(HttpStatus.SC_SEE_OTHER); when(((HttpMethodBase)_method).getResponseBodyAsString(1024)).thenReturn("{'healthy': true, 'topologySyncRequested': false}"); @@ -428,6 +428,6 @@ public void testExecuteRetrieveControllerSlaveStatus() throws BigSwitchBcfApiExc }.getType(), "/", null); verify(_method, times(1)).releaseConnection(); verify(_client, times(1)).executeMethod(_method); - assertEquals(_api.getControllerData().isMaster(), false); + assertEquals(_api.getControllerData().isPrimary(), false); } } diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServiceManagerImpl.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServiceManagerImpl.java index e6f3b4dfd1cc..3bf5bba9c43a 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServiceManagerImpl.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/management/ServiceManagerImpl.java @@ -79,7 +79,7 @@ public class ServiceManagerImpl implements ServiceManager { ContrailManager _manager; /** - * In the case of service instance the master object is in the contrail API server. This object stores the + * In the case of service instance the primary object is in the contrail API server. This object stores the * service instance parameters in the database. * * @param owner Used to determine the project. diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ModelObject.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ModelObject.java index acfff7de291b..f829d3c45297 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ModelObject.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ModelObject.java @@ -33,7 +33,7 @@ * * The object constructor should set the uuid and the internal id of the cloudstack objects. * - * The build method reads the master database (typically cloudstack mysql) and derives the state that + * The build method reads the primary database (typically cloudstack mysql) and derives the state that * we wish to reflect in the contrail API. This method should not modify the Contrail API state. * * The verify method reads the API server state and compares with cached properties. diff --git a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ServiceInstanceModel.java b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ServiceInstanceModel.java index e79053ca4f33..7a074a0036cb 100644 --- a/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ServiceInstanceModel.java +++ b/plugins/network-elements/juniper-contrail/src/main/java/org/apache/cloudstack/network/contrail/model/ServiceInstanceModel.java @@ -110,7 +110,7 @@ public String getName() { } /** - * Recreate the model object from the Contrail API which is the master for this type of object. + * Recreate the model object from the Contrail API which is main for this type of object. * @param siObj */ public void build(ModelController controller, ServiceInstance siObj) { diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/NetworkProviderTest.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/NetworkProviderTest.java index f9a478713c74..3ad36acc1601 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/NetworkProviderTest.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/NetworkProviderTest.java @@ -116,7 +116,7 @@ public class NetworkProviderTest extends TestCase { private ApiConnector _api; private static int s_mysqlSrverPort; private static long s_msId; - private static Merovingian2 s_lockMaster; + private static Merovingian2 s_lockController; public static boolean s_initDone = false; @BeforeClass @@ -127,14 +127,14 @@ public static void globalSetUp() throws Exception { s_logger.info("mysql server launched on port " + s_mysqlSrverPort); s_msId = ManagementServerNode.getManagementServerId(); - s_lockMaster = Merovingian2.createLockMaster(s_msId); + s_lockController = Merovingian2.createLockController(s_msId); } @AfterClass public static void globalTearDown() throws Exception { - s_lockMaster.cleanupForServer(s_msId); + s_lockController.cleanupForServer(s_msId); JmxUtil.unregisterMBean("Locks", "Locks"); - s_lockMaster = null; + s_lockController = null; AbstractApplicationContext ctx = (AbstractApplicationContext)ComponentContext.getApplicationContext(); Map lifecycleComponents = ctx.getBeansOfType(ComponentLifecycle.class); diff --git a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/PublicNetworkTest.java b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/PublicNetworkTest.java index c85bc659e31e..9564ec0a24ab 100644 --- a/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/PublicNetworkTest.java +++ b/plugins/network-elements/juniper-contrail/src/test/java/org/apache/cloudstack/network/contrail/management/PublicNetworkTest.java @@ -70,7 +70,7 @@ public class PublicNetworkTest extends TestCase { private static boolean s_initDone = false; private static int s_mysqlServerPort; private static long s_msId; - private static Merovingian2 s_lockMaster; + private static Merovingian2 s_lockController; private ManagementServerMock _server; private ApiConnector _spy; @@ -81,14 +81,14 @@ public static void globalSetUp() throws Exception { s_mysqlServerPort = TestDbSetup.init(null); s_logger.info("mysql server launched on port " + s_mysqlServerPort); s_msId = ManagementServerNode.getManagementServerId(); - s_lockMaster = Merovingian2.createLockMaster(s_msId); + s_lockController = Merovingian2.createLockController(s_msId); } @AfterClass public static void globalTearDown() throws Exception { - s_lockMaster.cleanupForServer(s_msId); + s_lockController.cleanupForServer(s_msId); JmxUtil.unregisterMBean("Locks", "Locks"); - s_lockMaster = null; + s_lockController = null; AbstractApplicationContext ctx = (AbstractApplicationContext)ComponentContext.getApplicationContext(); Map lifecycleComponents = ctx.getBeansOfType(ComponentLifecycle.class); diff --git a/python/lib/cloud_utils.py b/python/lib/cloud_utils.py index be908281be2d..ecef6f46d97e 100644 --- a/python/lib/cloud_utils.py +++ b/python/lib/cloud_utils.py @@ -1161,7 +1161,7 @@ class MigrationStep: You develop your own steps, and then pass a list of those steps to the Migrator instance that will run them in order. - When the migrator runs, it will take the list of steps you gave him, + When the migrator runs, it will take the list of steps you gave, and, for each step: a) instantiate it, passing the context you gave to the migrator diff --git a/server/src/main/java/com/cloud/api/ApiResponseHelper.java b/server/src/main/java/com/cloud/api/ApiResponseHelper.java index 5c5ba55f1972..a7968cc7f3a1 100644 --- a/server/src/main/java/com/cloud/api/ApiResponseHelper.java +++ b/server/src/main/java/com/cloud/api/ApiResponseHelper.java @@ -1946,7 +1946,7 @@ public AsyncJobResponse queryJobResult(final QueryAsyncJobResultCmd cmd) { //check permissions if (_accountMgr.isNormalUser(caller.getId())) { - //regular user can see only jobs he owns + //regular users can see only jobs they own if (caller.getId() != jobOwner.getId()) { throw new PermissionDeniedException("Account " + caller + " is not authorized to see job id=" + job.getId()); } diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index f204ead3055a..ae8212034ed5 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -3746,10 +3746,10 @@ public DetailOptionsResponse listDetailOptions(final ListDetailOptionsCmd cmd) { throw new CloudRuntimeException("Resource type not supported."); } if (CallContext.current().getCallingAccount().getType() != Account.ACCOUNT_TYPE_ADMIN) { - final List userBlacklistedSettings = Stream.of(QueryService.UserVMBlacklistedDetails.value().split(",")) + final List userDenyListedSettings = Stream.of(QueryService.UserVMDeniedDetails.value().split(",")) .map(item -> (item).trim()) .collect(Collectors.toList()); - for (final String detail : userBlacklistedSettings) { + for (final String detail : userDenyListedSettings) { if (options.containsKey(detail)) { options.remove(detail); } @@ -4149,6 +4149,6 @@ public String getConfigComponentName() { @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] {AllowUserViewDestroyedVM, UserVMBlacklistedDetails, UserVMReadOnlyDetails, SortKeyAscending, AllowUserViewAllDomainAccounts}; + return new ConfigKey[] {AllowUserViewDestroyedVM, UserVMDeniedDetails, UserVMReadOnlyDetails, SortKeyAscending, AllowUserViewAllDomainAccounts}; } } diff --git a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index 3d75ff7f1604..09eaee356e7e 100644 --- a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -344,9 +344,9 @@ public UserVmResponse newUserVmResponse(ResponseView view, String objectName, Us userVmResponse.setPoolType(userVm.getPoolType().toString()); } - // Remove blacklisted settings if user is not admin + // Remove deny listed settings if user is not admin if (caller.getType() != Account.ACCOUNT_TYPE_ADMIN) { - String[] userVmSettingsToHide = QueryService.UserVMBlacklistedDetails.value().split(","); + String[] userVmSettingsToHide = QueryService.UserVMDeniedDetails.value().split(","); for (String key : userVmSettingsToHide) { resourceDetails.remove(key.trim()); } diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index f5de35af3ed2..6e2138628b15 100755 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -1021,7 +1021,7 @@ private String validateConfigurationValue(final String name, String value, final if (route != null) { final String routeToVerify = route.trim(); if (!NetUtils.isValidIp4Cidr(routeToVerify)) { - throw new InvalidParameterValueException("Invalid value for blacklisted route: " + route + ". Valid format is list" + throw new InvalidParameterValueException("Invalid value for route: " + route + " in deny list. Valid format is list" + " of cidrs separated by coma. Example: 10.1.1.0/24,192.168.0.0/24"); } } @@ -3765,7 +3765,7 @@ public NetUtils.SupersetOrSubset checkIfSubsetOrSuperset(String vlanGateway, Str if (newVlanGateway == null && newVlanNetmask == null) { newVlanGateway = vlanGateway; newVlanNetmask = vlanNetmask; - // this means he is trying to add to the existing subnet. + // this means we are trying to add to the existing subnet. if (NetUtils.sameSubnet(newStartIP, newVlanGateway, newVlanNetmask)) { if (NetUtils.sameSubnet(newEndIP, newVlanGateway, newVlanNetmask)) { return NetUtils.SupersetOrSubset.sameSubnet; @@ -3840,7 +3840,7 @@ public boolean hasSameSubnet(boolean ipv4, String vlanGateway, String vlanNetmas // this implies the user is trying to add a new subnet // which is not a superset or subset of this subnet. } else if (val == NetUtils.SupersetOrSubset.isSubset) { - // this means he is trying to add to the same subnet. + // this means we are trying to add to the same subnet. throw new InvalidParameterValueException("The subnet you are trying to add is a subset of the existing subnet having gateway " + vlanGateway + " and netmask " + vlanNetmask); } else if (val == NetUtils.SupersetOrSubset.sameSubnet) { diff --git a/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java b/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java index dbcfe4d1b17f..f10dde6ac71e 100644 --- a/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java +++ b/server/src/main/java/com/cloud/deploy/FirstFitPlanner.java @@ -297,7 +297,7 @@ private List scanPodsForDestination(VirtualMachineProfile vmProfile, Deplo private Map getCapacityThresholdMap() { // Lets build this real time so that the admin wont have to restart MS - // if he changes these values + // if anyone changes these values Map disableThresholdMap = new HashMap(); String cpuDisableThresholdString = ClusterCPUCapacityDisableThreshold.value().toString(); diff --git a/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java b/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java index 8dd9c9b6d6fb..e05bc5cb5bc7 100644 --- a/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java +++ b/server/src/main/java/com/cloud/ha/HighAvailabilityManagerImpl.java @@ -90,7 +90,7 @@ * state. If a Investigator finds the VM is dead, then HA process is started on the VM, skipping step 2. 2. If the list of * Investigators can not determine if the VM is dead or alive. The list of FenceBuilders is invoked to fence off the VM so that * it won't do any damage to the storage and network. 3. The VM is marked as stopped. 4. The VM is started again via the normal - * process of starting VMs. Note that once the VM is marked as stopped, the user may have started the VM himself. 5. VMs that + * process of starting VMs. Note that once the VM is marked as stopped, the user may have started the VM explicitly. 5. VMs that * have re-started more than the configured number of times are marked as in Error state and the user is not allowed to restart * the VM. * diff --git a/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java b/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java index bbc6aa77b790..9191ddddb5b9 100644 --- a/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java +++ b/server/src/main/java/com/cloud/network/element/VirtualRouterElement.java @@ -617,7 +617,7 @@ public List getRouters(Network network){ } NetworkDetailVO updateInSequence=_networkDetailsDao.findDetail(network.getId(), Network.updatingInSequence); if(network.isRedundant() && updateInSequence!=null && "true".equalsIgnoreCase(updateInSequence.getValue())){ - List masterRouters=new ArrayList(); + List primaryRouters=new ArrayList(); int noOfrouters=routers.size(); while (noOfrouters>0){ DomainRouterVO router = routers.get(0); @@ -632,16 +632,16 @@ public List getRouters(Network network){ continue; } if(router.getRedundantState()!=VirtualRouter.RedundantState.BACKUP) { - masterRouters.add(router); + primaryRouters.add(router); routers.remove(router); } noOfrouters--; } - if(routers.size()==0 && masterRouters.size()==0){ + if(routers.size()==0 && primaryRouters.size()==0){ return null; } - if(routers.size()==0 && masterRouters.size()!=0){ - routers=masterRouters; + if(routers.size()==0 && primaryRouters.size()!=0){ + routers=primaryRouters; } routers=routers.subList(0,1); routers.get(0).setUpdateState(VirtualRouter.UpdateState.UPDATE_IN_PROGRESS); diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index ebcf16afe3c1..7ff911393f1a 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -805,7 +805,7 @@ protected void updateSite2SiteVpnConnectionState(final List rout if (conns == null || conns.isEmpty()) { continue; } - if (router.getIsRedundantRouter() && router.getRedundantState() != RedundantState.MASTER){ + if (router.getIsRedundantRouter() && router.getRedundantState() != RedundantState.PRIMARY){ continue; } if (router.getState() != VirtualMachine.State.Running) { @@ -935,7 +935,7 @@ protected void updateRoutersRedundantState(final List routers) { final String context = "Redundant virtual router (name: " + router.getHostName() + ", id: " + router.getId() + ") " + " just switch from " + prevState + " to " + currState; s_logger.info(context); - if (currState == RedundantState.MASTER) { + if (currState == RedundantState.PRIMARY) { _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), title, context); } } @@ -943,12 +943,12 @@ protected void updateRoutersRedundantState(final List routers) { } // Ensure router status is update to date before execute this function. The - // function would try best to recover all routers except MASTER - protected void recoverRedundantNetwork(final DomainRouterVO masterRouter, final DomainRouterVO backupRouter) { - if (masterRouter.getState() == VirtualMachine.State.Running && backupRouter.getState() == VirtualMachine.State.Running) { - final HostVO masterHost = _hostDao.findById(masterRouter.getHostId()); + // function would try best to recover all routers except PRIMARY + protected void recoverRedundantNetwork(final DomainRouterVO primaryRouter, final DomainRouterVO backupRouter) { + if (primaryRouter.getState() == VirtualMachine.State.Running && backupRouter.getState() == VirtualMachine.State.Running) { + final HostVO primaryHost = _hostDao.findById(primaryRouter.getHostId()); final HostVO backupHost = _hostDao.findById(backupRouter.getHostId()); - if (masterHost.getState() == Status.Up && backupHost.getState() == Status.Up) { + if (primaryHost.getState() == Status.Up && backupHost.getState() == Status.Up) { final String title = "Reboot " + backupRouter.getInstanceName() + " to ensure redundant virtual routers work"; if (s_logger.isDebugEnabled()) { s_logger.debug(title); @@ -971,7 +971,7 @@ protected class RvRStatusUpdateTask extends ManagedContextRunnable { /* * In order to make fail-over works well at any time, we have to ensure: - * 1. Backup router's priority = Master's priority - DELTA + 1 + * 1. Backup router's priority = Primary's priority - DELTA + 1 */ private void checkSanity(final List routers) { final Set checkedNetwork = new HashSet(); @@ -1000,16 +1000,16 @@ private void checkSanity(final List routers) { continue; } - DomainRouterVO masterRouter = null; + DomainRouterVO primaryRouter = null; DomainRouterVO backupRouter = null; for (final DomainRouterVO r : checkingRouters) { - if (r.getRedundantState() == RedundantState.MASTER) { - if (masterRouter == null) { - masterRouter = r; + if (r.getRedundantState() == RedundantState.PRIMARY) { + if (primaryRouter == null) { + primaryRouter = r; } else { // Wilder Rodrigues (wrodrigues@schubergphilis.com // Force a restart in order to fix the conflict - // recoverRedundantNetwork(masterRouter, r); + // recoverRedundantNetwork(primaryRouter, r); break; } } else if (r.getRedundantState() == RedundantState.BACKUP) { @@ -1027,7 +1027,7 @@ private void checkSanity(final List routers) { } } - private void checkDuplicateMaster(final List routers) { + private void checkDuplicatePrimary(final List routers) { final Map networkRouterMaps = new HashMap(); for (final DomainRouterVO router : routers) { final List routerGuestNtwkIds = _routerDao.getRouterNetworks(router.getId()); @@ -1035,13 +1035,13 @@ private void checkDuplicateMaster(final List routers) { final Long vpcId = router.getVpcId(); if (vpcId != null || routerGuestNtwkIds.size() > 0) { Long routerGuestNtwkId = vpcId != null ? vpcId : routerGuestNtwkIds.get(0); - if (router.getRedundantState() == RedundantState.MASTER) { + if (router.getRedundantState() == RedundantState.PRIMARY) { if (networkRouterMaps.containsKey(routerGuestNtwkId)) { final DomainRouterVO dupRouter = networkRouterMaps.get(routerGuestNtwkId); - final String title = "More than one redundant virtual router is in MASTER state! Router " + router.getHostName() + " and router " + final String title = "More than one redundant virtual router is in PRIMARY state! Router " + router.getHostName() + " and router " + dupRouter.getHostName(); final String context = "Virtual router (name: " + router.getHostName() + ", id: " + router.getId() + " and router (name: " + dupRouter.getHostName() - + ", id: " + router.getId() + ") are both in MASTER state! If the problem persist, restart both of routers. "; + + ", id: " + router.getId() + ") are both in PRIMARY state! If the problem persist, restart both of routers. "; _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), title, context); s_logger.warn(context); } else { @@ -1083,7 +1083,7 @@ protected void runInContext() { updateRoutersRedundantState(routers); // Wilder Rodrigues (wrodrigues@schubergphilis.com) - One of the routers is not running, // so we don't have to continue here since the host will be null any way. Also, there is no need - // To check either for sanity of duplicate master. Thus, just update the state and get lost. + // To check either for sanity of duplicate primary. Thus, just update the state and get lost. continue; } @@ -1104,7 +1104,7 @@ protected void runInContext() { continue; } updateRoutersRedundantState(routers); - checkDuplicateMaster(routers); + checkDuplicatePrimary(routers); checkSanity(routers); } catch (final Exception ex) { s_logger.error("Fail to complete the RvRStatusUpdateTask! ", ex); @@ -2231,13 +2231,13 @@ protected StringBuilder createRedundantRouterArgs(final NicProfile nic, final Do String redundantState = RedundantState.BACKUP.toString(); router.setRedundantState(RedundantState.BACKUP); if (routers.size() == 0) { - redundantState = RedundantState.MASTER.toString(); - router.setRedundantState(RedundantState.MASTER); + redundantState = RedundantState.PRIMARY.toString(); + router.setRedundantState(RedundantState.PRIMARY); } else { final DomainRouterVO router0 = routers.get(0); if (router.getId() == router0.getId()) { - redundantState = RedundantState.MASTER.toString(); - router.setRedundantState(RedundantState.MASTER); + redundantState = RedundantState.PRIMARY.toString(); + router.setRedundantState(RedundantState.PRIMARY); } } diff --git a/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java b/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java index a03e3deb1030..469196802fce 100644 --- a/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java +++ b/server/src/main/java/com/cloud/network/vpc/VpcManagerImpl.java @@ -2293,9 +2293,9 @@ public StaticRoute createStaticRoute(final long gatewayId, final String cidr) th throw new InvalidParameterValueException("CIDR should be outside of link local cidr " + NetUtils.getLinkLocalCIDR()); } - // 3) Verify against blacklisted routes - if (isCidrBlacklisted(cidr, vpc.getZoneId())) { - throw new InvalidParameterValueException("The static gateway cidr overlaps with one of the blacklisted routes of the zone the VPC belongs to"); + // 3) Verify against denied routes + if (isCidrDenylisted(cidr, vpc.getZoneId())) { + throw new InvalidParameterValueException("The static gateway cidr overlaps with one of the denied routes of the zone the VPC belongs to"); } return Transaction.execute(new TransactionCallbackWithException() { @@ -2317,14 +2317,14 @@ public StaticRouteVO doInTransaction(final TransactionStatus status) throws Netw }); } - protected boolean isCidrBlacklisted(final String cidr, final long zoneId) { + protected boolean isCidrDenylisted(final String cidr, final long zoneId) { final String routesStr = NetworkOrchestrationService.GuestDomainSuffix.valueIn(zoneId); if (routesStr != null && !routesStr.isEmpty()) { - final String[] cidrBlackList = routesStr.split(","); + final String[] cidrDenyList = routesStr.split(","); - if (cidrBlackList != null && cidrBlackList.length > 0) { - for (final String blackListedRoute : cidrBlackList) { - if (NetUtils.isNetworksOverlap(blackListedRoute, cidr)) { + if (cidrDenyList != null && cidrDenyList.length > 0) { + for (final String denyListedRoute : cidrDenyList) { + if (NetUtils.isNetworksOverlap(denyListedRoute, cidr)) { return true; } } diff --git a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java index bd4e96f6aa17..af7c2a2acb85 100644 --- a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java +++ b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java @@ -714,8 +714,8 @@ public ResourceLimitVO updateResourceLimit(Long accountId, Long domainId, Intege } if ((caller.getAccountId() == accountId.longValue()) && (_accountMgr.isDomainAdmin(caller.getId()) || caller.getType() == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN)) { - // If the admin is trying to update his own account, disallow. - throw new PermissionDeniedException("Unable to update resource limit for his own account " + accountId + ", permission denied"); + // If the admin is trying to update their own account, disallow. + throw new PermissionDeniedException("Unable to update resource limit for their own account " + accountId + ", permission denied"); } if (account.getType() == Account.ACCOUNT_TYPE_PROJECT) { diff --git a/server/src/main/java/com/cloud/server/LockMasterListener.java b/server/src/main/java/com/cloud/server/LockControllerListener.java similarity index 83% rename from server/src/main/java/com/cloud/server/LockMasterListener.java rename to server/src/main/java/com/cloud/server/LockControllerListener.java index 27cf74f4375b..ba555b31343f 100644 --- a/server/src/main/java/com/cloud/server/LockMasterListener.java +++ b/server/src/main/java/com/cloud/server/LockControllerListener.java @@ -26,11 +26,11 @@ * when a management server is down. * */ -public class LockMasterListener implements ClusterManagerListener { - Merovingian2 _lockMaster; +public class LockControllerListener implements ClusterManagerListener { + Merovingian2 _lockController; - public LockMasterListener(long msId) { - _lockMaster = Merovingian2.createLockMaster(msId); + public LockControllerListener(long msId) { + _lockController = Merovingian2.createLockController(msId); } @Override @@ -40,7 +40,7 @@ public void onManagementNodeJoined(List nodeList @Override public void onManagementNodeLeft(List nodeList, long selfNodeId) { for (ManagementServerHost node : nodeList) { - _lockMaster.cleanupForServer(node.getMsid()); + _lockController.cleanupForServer(node.getMsid()); } } diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 2e7c8fe49a18..98937cad0e36 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -879,7 +879,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Inject private VpcDao _vpcDao; - private LockMasterListener _lockMasterListener; + private LockControllerListener _lockControllerListener; private final ScheduledExecutorService _eventExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("EventChecker")); private final ScheduledExecutorService _alertExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("AlertChecker")); @@ -985,11 +985,11 @@ public boolean start() { // Set human readable sizes NumbersUtil.enableHumanReadableSizes = _configDao.findByName("display.human.readable.sizes").getValue().equals("true"); - if (_lockMasterListener == null) { - _lockMasterListener = new LockMasterListener(ManagementServerNode.getManagementServerId()); + if (_lockControllerListener == null) { + _lockControllerListener = new LockControllerListener(ManagementServerNode.getManagementServerId()); } - _clusterMgr.registerListener(_lockMasterListener); + _clusterMgr.registerListener(_lockControllerListener); enableAdminUser("password"); return true; @@ -3815,7 +3815,7 @@ public ArrayList getCloudIdentifierResponse(final long userId) { String signature = ""; try { - // get the user obj to get his secret key + // get the user obj to get their secret key user = _accountMgr.getActiveUser(userId); final String secretKey = user.getSecretKey(); final String input = cloudIdentifier; @@ -4551,12 +4551,12 @@ public void setStoragePoolAllocators(final List storagePoo _storagePoolAllocators = storagePoolAllocators; } - public LockMasterListener getLockMasterListener() { - return _lockMasterListener; + public LockControllerListener getLockControllerListener() { + return _lockControllerListener; } - public void setLockMasterListener(final LockMasterListener lockMasterListener) { - _lockMasterListener = lockMasterListener; + public void setLockControllerListener(final LockControllerListener lockControllerListener) { + _lockControllerListener = lockControllerListener; } } diff --git a/server/src/main/java/com/cloud/user/AccountManagerImpl.java b/server/src/main/java/com/cloud/user/AccountManagerImpl.java index 054c3342204f..b6f4e5e36000 100644 --- a/server/src/main/java/com/cloud/user/AccountManagerImpl.java +++ b/server/src/main/java/com/cloud/user/AccountManagerImpl.java @@ -581,7 +581,7 @@ public void checkAccess(Account caller, AccessType accessType, boolean sameOwner @Override public Long checkAccessAndSpecifyAuthority(Account caller, Long zoneId) { - // We just care for resource domain admin for now. He should be permitted to see only his zone. + // We just care for resource domain admins for now, and they should be permitted to see only their zone. if (isResourceDomainAdmin(caller.getAccountId())) { if (zoneId == null) { return getZoneIdForAccount(caller); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 579a33d6ee80..d9dc32d673fb 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -2545,7 +2545,6 @@ protected void runInContext() { scanLock.releaseRef(); } } - } @Override @@ -2581,7 +2580,7 @@ public UserVm updateVirtualMachine(UpdateVMCmd cmd) throws ResourceUnavailableEx updateDisplayVmFlag(isDisplayVm, id, vmInstance); } final Account caller = CallContext.current().getCallingAccount(); - final List userBlacklistedSettings = Stream.of(QueryService.UserVMBlacklistedDetails.value().split(",")) + final List userDenyListedSettings = Stream.of(QueryService.UserVMDeniedDetails.value().split(",")) .map(item -> (item).trim()) .collect(Collectors.toList()); final List userReadOnlySettings = Stream.of(QueryService.UserVMReadOnlyDetails.value().split(",")) @@ -2592,7 +2591,7 @@ public UserVm updateVirtualMachine(UpdateVMCmd cmd) throws ResourceUnavailableEx userVmDetailsDao.removeDetails(id); } else { for (final UserVmDetailVO detail : userVmDetailsDao.listDetails(id)) { - if (detail != null && !userBlacklistedSettings.contains(detail.getName()) + if (detail != null && !userDenyListedSettings.contains(detail.getName()) && !userReadOnlySettings.contains(detail.getName())) { userVmDetailsDao.removeDetail(id, detail.getName()); } @@ -2605,18 +2604,18 @@ public UserVm updateVirtualMachine(UpdateVMCmd cmd) throws ResourceUnavailableEx } if (caller != null && caller.getType() != Account.ACCOUNT_TYPE_ADMIN) { - // Ensure blacklisted or read-only detail is not passed by non-root-admin user + // Ensure denied or read-only detail is not passed by non-root-admin user for (final String detailName : details.keySet()) { - if (userBlacklistedSettings.contains(detailName)) { + if (userDenyListedSettings.contains(detailName)) { throw new InvalidParameterValueException("You're not allowed to add or edit the restricted setting: " + detailName); } if (userReadOnlySettings.contains(detailName)) { throw new InvalidParameterValueException("You're not allowed to add or edit the read-only setting: " + detailName); } } - // Add any hidden/blacklisted or read-only detail + // Add any hidden/denied or read-only detail for (final UserVmDetailVO detail : userVmDetailsDao.listDetails(id)) { - if (userBlacklistedSettings.contains(detail.getName()) || userReadOnlySettings.contains(detail.getName())) { + if (userDenyListedSettings.contains(detail.getName()) || userReadOnlySettings.contains(detail.getName())) { details.put(detail.getName(), detail.getValue()); } } @@ -5569,7 +5568,7 @@ protected boolean isValidXenOrVmwareConfiguration(String cfg, String[] allowedKe * @param vm */ protected void persistExtraConfigKvm(String decodedUrl, UserVm vm) { - // validate config against blacklisted cfg commands + // validate config against denied cfg commands validateKvmExtraConfig(decodedUrl); String[] extraConfigs = decodedUrl.split("\n\n"); for (String cfg : extraConfigs) { @@ -5591,7 +5590,7 @@ protected void persistExtraConfigKvm(String decodedUrl, UserVm vm) { /** * This method is called by the persistExtraConfigKvm - * Validates passed extra configuration data for KVM and validates against blacklist of unwanted commands + * Validates passed extra configuration data for KVM and validates against deny-list of unwanted commands * controlled by Root admin * @param decodedUrl string containing xml configuration to be validated */ diff --git a/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml b/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml index 672f7c0d4144..e9905c52d930 100644 --- a/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml +++ b/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml @@ -51,7 +51,7 @@ - + - + diff --git a/server/src/test/java/com/cloud/network/element/VirtualRouterElementTest.java b/server/src/test/java/com/cloud/network/element/VirtualRouterElementTest.java index 7c2a3c781785..1b5343881b8b 100644 --- a/server/src/test/java/com/cloud/network/element/VirtualRouterElementTest.java +++ b/server/src/test/java/com/cloud/network/element/VirtualRouterElementTest.java @@ -250,7 +250,7 @@ public void testGetRouters1(){ public void testGetRouters2(){ Network networkUpdateInprogress=new NetworkVO(2l,null,null,null,1l,1l,1l,1l,"d","d","d",null,1l,1l,null,true,null,true); mockDAOs((NetworkVO)networkUpdateInprogress,testOffering); - //alwyas return backup routers first when both master and backup need update. + //alwyas return backup routers first when both primary and backup need update. List routers=virtualRouterElement.getRouters(networkUpdateInprogress); assertTrue(routers.size()==1); assertTrue(routers.get(0).getRedundantState()==RedundantState.BACKUP && routers.get(0).getUpdateState()==VirtualRouter.UpdateState.UPDATE_IN_PROGRESS); @@ -260,7 +260,7 @@ public void testGetRouters2(){ public void testGetRouters3(){ Network network=new NetworkVO(3l,null,null,null,1l,1l,1l,1l,"d","d","d",null,1l,1l,null,true,null,true); mockDAOs((NetworkVO)network,testOffering); - //alwyas return backup routers first when both master and backup need update. + //alwyas return backup routers first when both primary and backup need update. List routers=virtualRouterElement.getRouters(network); assertTrue(routers.size()==4); } @@ -376,7 +376,7 @@ private void mockDAOs(final NetworkVO network, final NetworkOfferingVO offering) /* stopPending */ false, /* vpcId */ null); routerNeedUpdateBackup.setUpdateState(VirtualRouter.UpdateState.UPDATE_NEEDED); - final DomainRouterVO routerNeedUpdateMaster = new DomainRouterVO(/* id */ 3L, + final DomainRouterVO routerNeedUpdatePrimary = new DomainRouterVO(/* id */ 3L, /* serviceOfferingId */ 1L, /* elementId */ 0L, "name", @@ -387,11 +387,11 @@ private void mockDAOs(final NetworkVO network, final NetworkOfferingVO offering) /* accountId */ 1L, /* userId */ 1L, /* isRedundantRouter */ false, - RedundantState.MASTER, + RedundantState.PRIMARY, /* haEnabled */ false, /* stopPending */ false, /* vpcId */ null); - routerNeedUpdateMaster.setUpdateState(VirtualRouter.UpdateState.UPDATE_NEEDED); + routerNeedUpdatePrimary.setUpdateState(VirtualRouter.UpdateState.UPDATE_NEEDED); final DomainRouterVO routerUpdateComplete = new DomainRouterVO(/* id */ 4L, /* serviceOfferingId */ 1L, /* elementId */ 0L, @@ -427,12 +427,12 @@ private void mockDAOs(final NetworkVO network, final NetworkOfferingVO offering) List routerList1=new ArrayList<>(); routerList1.add(routerUpdateComplete); routerList1.add(routerNeedUpdateBackup); - routerList1.add(routerNeedUpdateMaster); + routerList1.add(routerNeedUpdatePrimary); routerList1.add(routerUpdateInProgress); List routerList2=new ArrayList<>(); routerList2.add(routerUpdateComplete); routerList2.add(routerNeedUpdateBackup); - routerList2.add(routerNeedUpdateMaster); + routerList2.add(routerNeedUpdatePrimary); List routerList3=new ArrayList<>(); routerList3.add(routerUpdateComplete); routerList3.add(routerUpdateInProgress); diff --git a/server/src/test/java/com/cloud/network/router/VirtualNetworkApplianceManagerImplTest.java b/server/src/test/java/com/cloud/network/router/VirtualNetworkApplianceManagerImplTest.java index aed4769ab964..7b2906f2a1a9 100644 --- a/server/src/test/java/com/cloud/network/router/VirtualNetworkApplianceManagerImplTest.java +++ b/server/src/test/java/com/cloud/network/router/VirtualNetworkApplianceManagerImplTest.java @@ -263,7 +263,7 @@ public void testDeployRouterNotRedundant() throws Exception { @Test public void testUpdateSite2SiteVpnConnectionState() throws Exception{ - DomainRouterVO router = new DomainRouterVO(1L, 1L, 1L, "First testing router", 1L, Hypervisor.HypervisorType.XenServer, 1L, 1L, 1L, 1L, false, VirtualRouter.RedundantState.MASTER, true, true, 1L); + DomainRouterVO router = new DomainRouterVO(1L, 1L, 1L, "First testing router", 1L, Hypervisor.HypervisorType.XenServer, 1L, 1L, 1L, 1L, false, VirtualRouter.RedundantState.PRIMARY, true, true, 1L); router.setState(VirtualMachine.State.Running); router.setPrivateIpAddress("192.168.50.15"); diff --git a/systemvm/agent/noVNC/vendor/pako/lib/zlib/trees.js b/systemvm/agent/noVNC/vendor/pako/lib/zlib/trees.js index a69b8a592fe0..be5d0a9675b9 100644 --- a/systemvm/agent/noVNC/vendor/pako/lib/zlib/trees.js +++ b/systemvm/agent/noVNC/vendor/pako/lib/zlib/trees.js @@ -951,9 +951,9 @@ function send_all_trees(s, lcodes, dcodes, blcodes) * Check if the data type is TEXT or BINARY, using the following algorithm: * - TEXT if the two conditions below are satisfied: * a) There are no non-portable control characters belonging to the - * "black list" (0..6, 14..25, 28..31). + * "deny list" (0..6, 14..25, 28..31). * b) There is at least one printable character belonging to the - * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). + * "allow list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). * - BINARY otherwise. * - The following partially-portable control characters form a * "gray list" that is ignored in this detection algorithm: @@ -961,21 +961,21 @@ function send_all_trees(s, lcodes, dcodes, blcodes) * IN assertion: the fields Freq of dyn_ltree are set. */ function detect_data_type(s) { - /* black_mask is the bit mask of black-listed bytes + /* deny_mask is the bit mask of deny-listed bytes * set bits 0..6, 14..25, and 28..31 * 0xf3ffc07f = binary 11110011111111111100000001111111 */ - var black_mask = 0xf3ffc07f; + var deny_mask = 0xf3ffc07f; var n; - /* Check for non-textual ("black-listed") bytes. */ - for (n = 0; n <= 31; n++, black_mask >>>= 1) { - if ((black_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) { + /* Check for non-textual ("deny-listed") bytes. */ + for (n = 0; n <= 31; n++, deny_mask >>>= 1) { + if ((deny_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) { return Z_BINARY; } } - /* Check for textual ("white-listed") bytes. */ + /* Check for textual ("allow-listed") bytes. */ if (s.dyn_ltree[9 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[10 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[13 * 2]/*.Freq*/ !== 0) { return Z_TEXT; @@ -986,7 +986,7 @@ function detect_data_type(s) { } } - /* There are no "black-listed" or "white-listed" bytes: + /* There are no "deny-listed" or "allow-listed" bytes: * this stream either is empty or has tolerated ("gray-listed") bytes only. */ return Z_BINARY; diff --git a/systemvm/debian/opt/cloud/bin/checkrouter.sh b/systemvm/debian/opt/cloud/bin/checkrouter.sh index c0d2ea770190..ae3aff7eb10c 100755 --- a/systemvm/debian/opt/cloud/bin/checkrouter.sh +++ b/systemvm/debian/opt/cloud/bin/checkrouter.sh @@ -27,13 +27,13 @@ fi ROUTER_TYPE=$(cat /etc/cloudstack/cmdline.json | grep type | awk '{print $2;}' | sed -e 's/[,\"]//g') if [ "$ROUTER_TYPE" = "router" ] then - ROUTER_STATE=$(ip addr show dev eth0 | grep inet | wc -l | xargs bash -c 'if [ $0 == 2 ]; then echo "MASTER"; else echo "BACKUP"; fi') + ROUTER_STATE=$(ip addr show dev eth0 | grep inet | wc -l | xargs bash -c 'if [ $0 == 2 ]; then echo "PRIMARY"; else echo "BACKUP"; fi') STATUS=$ROUTER_STATE else ROUTER_STATE=$(ip addr show dev eth1 | grep state | awk '{print $9;}') if [ "$ROUTER_STATE" = "UP" ] then - STATUS=MASTER + STATUS=PRIMARY elif [ "$ROUTER_STATE" = "DOWN" ] then STATUS=BACKUP diff --git a/systemvm/debian/opt/cloud/bin/master.py b/systemvm/debian/opt/cloud/bin/configure_router.py similarity index 87% rename from systemvm/debian/opt/cloud/bin/master.py rename to systemvm/debian/opt/cloud/bin/configure_router.py index 26de8b93bb39..8d1f790e4722 100755 --- a/systemvm/debian/opt/cloud/bin/master.py +++ b/systemvm/debian/opt/cloud/bin/configure_router.py @@ -25,9 +25,9 @@ from optparse import OptionParser parser = OptionParser() -parser.add_option("-m", "--master", - action="store_true", default=False, dest="master", - help="Set router master") +parser.add_option("-p", "--primary", + action="store_true", default=False, dest="primary", + help="Set router primary") parser.add_option("-b", "--backup", action="store_true", default=False, dest="backup", help="Set router backup") @@ -42,15 +42,15 @@ format=config.get_format()) config.cmdline() cl = CsCmdLine("cmdline", config) -# Update the configuration to set state as backup and let keepalived decide who the real Master is! -cl.set_master_state(False) +# Update the configuration to set state as backup and let keepalived decide who the real Primary is! +cl.set_primary_state(False) cl.save() config.set_address() red = CsRedundant(config) -if options.master: - red.set_master() +if options.primary: + red.set_primary() if options.backup: red.set_backup() diff --git a/systemvm/debian/opt/cloud/bin/cs/CsAddress.py b/systemvm/debian/opt/cloud/bin/cs/CsAddress.py index be0c521cd031..0bc5d44ac531 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsAddress.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsAddress.py @@ -608,13 +608,13 @@ def post_config_change(self, method): app.setup() # If redundant then this is dealt with - # by the master backup functions + # by the primary backup functions if not cmdline.is_redundant(): if method == "add": CsPasswdSvc(self.address['public_ip']).start() elif method == "delete": CsPasswdSvc(self.address['public_ip']).stop() - elif cmdline.is_master(): + elif cmdline.is_primary(): if method == "add": CsPasswdSvc(self.get_gateway() + "," + self.address['public_ip']).start() elif method == "delete": diff --git a/systemvm/debian/opt/cloud/bin/cs/CsDatabag.py b/systemvm/debian/opt/cloud/bin/cs/CsDatabag.py index adb9a1aae744..aa738dfe805c 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsDatabag.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsDatabag.py @@ -103,23 +103,23 @@ def get_eth2_ip(self): else: return "unknown" - def is_master(self): + def is_primary(self): if not self.is_redundant(): return False if "redundant_state" in self.idata(): - return self.idata()['redundant_state'] == "MASTER" + return self.idata()['redundant_state'] == "PRIMARY" return False def set_fault_state(self): self.idata()['redundant_state'] = "FAULT" - self.idata()['redundant_master'] = False + self.idata()['redundant_primary'] = False - def set_master_state(self, value): + def set_primary_state(self, value): if value: - self.idata()['redundant_state'] = "MASTER" + self.idata()['redundant_state'] = "PRIMARY" else: self.idata()['redundant_state'] = "BACKUP" - self.idata()['redundant_master'] = value + self.idata()['redundant_primary'] = value def get_router_id(self): if "router_id" in self.idata(): diff --git a/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py b/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py index 91b95c6c676a..01d2c46b8ee3 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsDhcp.py @@ -71,7 +71,7 @@ def process(self): self.write_hosts() - if not self.cl.is_redundant() or self.cl.is_master(): + if not self.cl.is_redundant() or self.cl.is_primary(): if restart_dnsmasq: CsHelper.service("dnsmasq", "restart") else: diff --git a/systemvm/debian/opt/cloud/bin/cs/CsHelper.py b/systemvm/debian/opt/cloud/bin/cs/CsHelper.py index 00aa4cb64089..c892b5df9102 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsHelper.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsHelper.py @@ -29,8 +29,8 @@ PUBLIC_INTERFACES = {"router": "eth2", "vpcrouter": "eth1"} -STATE_COMMANDS = {"router": "ip addr show dev eth0 | grep inet | wc -l | xargs bash -c 'if [ $0 == 2 ]; then echo \"MASTER\"; else echo \"BACKUP\"; fi'", - "vpcrouter": "ip addr show dev eth1 | grep state | awk '{print $9;}' | xargs bash -c 'if [ $0 == \"UP\" ]; then echo \"MASTER\"; else echo \"BACKUP\"; fi'"} +STATE_COMMANDS = {"router": "ip addr show dev eth0 | grep inet | wc -l | xargs bash -c 'if [ $0 == 2 ]; then echo \"PRIMARY\"; else echo \"BACKUP\"; fi'", + "vpcrouter": "ip addr show dev eth1 | grep state | awk '{print $9;}' | xargs bash -c 'if [ $0 == \"UP\" ]; then echo \"PRIMARY\"; else echo \"BACKUP\"; fi'"} def reconfigure_interfaces(router_config, interfaces): @@ -41,14 +41,14 @@ def reconfigure_interfaces(router_config, interfaces): cmd = "ip link set %s up" % interface.get_device() # If redundant only bring up public interfaces that are not eth1. # Reason: private gateways are public interfaces. - # master.py and keepalived will deal with eth1 public interface. + # configure_router.py and keepalived will deal with eth1 public interface. if router_config.is_redundant() and interface.is_public(): state_cmd = STATE_COMMANDS[router_config.get_type()] logging.info("Check state command => %s" % state_cmd) state = execute(state_cmd)[0] logging.info("Route state => %s" % state) - if interface.get_device() != PUBLIC_INTERFACES[router_config.get_type()] and state == "MASTER": + if interface.get_device() != PUBLIC_INTERFACES[router_config.get_type()] and state == "PRIMARY": execute(cmd) else: execute(cmd) diff --git a/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py b/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py index 190de1ab82e2..23622fdbf5d0 100755 --- a/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py +++ b/systemvm/debian/opt/cloud/bin/cs/CsRedundant.py @@ -199,20 +199,20 @@ def _redundant_on(self): if keepalived_conf.is_changed() or force_keepalived_restart: keepalived_conf.commit() os.chmod(self.KEEPALIVED_CONF, 0o644) - if force_keepalived_restart or not self.cl.is_master(): + if force_keepalived_restart or not self.cl.is_primary(): CsHelper.service("keepalived", "restart") else: CsHelper.service("keepalived", "reload") def release_lock(self): try: - os.remove("/tmp/master_lock") + os.remove("/tmp/primary_lock") except OSError: pass def set_lock(self): """ - Make sure that master state changes happen sequentially + Make sure that primary state changes happen sequentially """ iterations = 10 time_between = 1 @@ -220,13 +220,13 @@ def set_lock(self): for iter in range(0, iterations): try: s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - s.bind('/tmp/master_lock') + s.bind('/tmp/primary_lock') return s except socket.error, e: error_code = e.args[0] error_string = e.args[1] print "Process already running (%d:%s). Exiting" % (error_code, error_string) - logging.info("Master is already running, waiting") + logging.info("Primary is already running, waiting") sleep(time_between) def set_fault(self): @@ -290,7 +290,7 @@ def set_backup(self): CsHelper.service("dnsmasq", "stop") - self.cl.set_master_state(False) + self.cl.set_primary_state(False) self.cl.save() self.release_lock() @@ -298,14 +298,14 @@ def set_backup(self): CsHelper.reconfigure_interfaces(self.cl, interfaces) logging.info("Router switched to backup mode") - def set_master(self): - """ Set the current router to master """ + def set_primary(self): + """ Set the current router to primary """ if not self.cl.is_redundant(): - logging.error("Set master called on non-redundant router") + logging.error("Set primary called on non-redundant router") return self.set_lock() - logging.debug("Setting router to master") + logging.debug("Setting router to primary") dev = '' interfaces = [interface for interface in self.address.get_interfaces() if interface.is_public()] @@ -348,7 +348,7 @@ def set_master(self): CsPasswdSvc(interface.get_gateway() + "," + interface.get_ip()).restart() CsHelper.service("dnsmasq", "restart") - self.cl.set_master_state(True) + self.cl.set_primary_state(True) self.cl.save() self.release_lock() @@ -362,7 +362,7 @@ def set_master(self): public_devices.sort() # Ensure the default route is added, or outgoing traffic from VMs with static NAT on - # the subsequent interfaces will go from he wrong IP + # the subsequent interfaces will go from the wrong IP route = CsRoute() dev = '' for interface in interfaces: @@ -381,7 +381,7 @@ def set_master(self): if interface.get_device() == device: CsHelper.execute("arping -I %s -U %s -c 1" % (device, interface.get_ip())) - logging.info("Router switched to master mode") + logging.info("Router switched to primary mode") def _collect_ignore_ips(self): """ diff --git a/systemvm/debian/opt/cloud/bin/ipassoc.sh b/systemvm/debian/opt/cloud/bin/ipassoc.sh index 9bcb13279d76..e653e72aa62c 100755 --- a/systemvm/debian/opt/cloud/bin/ipassoc.sh +++ b/systemvm/debian/opt/cloud/bin/ipassoc.sh @@ -358,7 +358,7 @@ cflag= nflag= op="" -is_master=0 +is_primary=0 is_redundant=0 if_keep_state=0 IFACEGWIPFILE='/var/cache/cloud/ifaceGwIp' @@ -366,13 +366,13 @@ grep "redundant_router=1" /var/cache/cloud/cmdline > /dev/null if [ $? -eq 0 ] then is_redundant=1 - sudo /opt/cloud/bin/checkrouter.sh --no-lock|grep "Status: MASTER" > /dev/null 2>&1 + sudo /opt/cloud/bin/checkrouter.sh --no-lock|grep "Status: PRIMARY" > /dev/null 2>&1 if [ $? -eq 0 ] then - is_master=1 + is_primary=1 fi fi -if [ $is_redundant -eq 1 -a $is_master -ne 1 ] +if [ $is_redundant -eq 1 -a $is_primary -ne 1 ] then if_keep_state=1 fi diff --git a/systemvm/debian/opt/cloud/templates/check_heartbeat.sh.templ b/systemvm/debian/opt/cloud/templates/check_heartbeat.sh.templ index 62a2b180e6c4..47db33e716e8 100755 --- a/systemvm/debian/opt/cloud/templates/check_heartbeat.sh.templ +++ b/systemvm/debian/opt/cloud/templates/check_heartbeat.sh.templ @@ -58,7 +58,7 @@ then systemctl stop --now conntrackd >> $ROUTER_LOG 2>&1 #Set fault so we have the same effect as a KeepaliveD fault. - python /opt/cloud/bin/master.py --fault + python /opt/cloud/bin/configure_router.py --fault pkill -9 keepalived >> $ROUTER_LOG 2>&1 || true pkill -9 conntrackd >> $ROUTER_LOG 2>&1 || true diff --git a/systemvm/debian/opt/cloud/templates/checkrouter.sh.templ b/systemvm/debian/opt/cloud/templates/checkrouter.sh.templ index fcfc58d5b95f..2aff777b9a53 100755 --- a/systemvm/debian/opt/cloud/templates/checkrouter.sh.templ +++ b/systemvm/debian/opt/cloud/templates/checkrouter.sh.templ @@ -21,13 +21,13 @@ INTERFACE=eth1 ROUTER_TYPE=$(cat /etc/cloudstack/cmdline.json | grep type | awk '{print $2;}' | sed -e 's/[,\"]//g') if [ $ROUTER_TYPE = "router" ] then - ROUTER_STATE=$(ip addr | grep eth0 | grep inet | wc -l | xargs bash -c 'if [ $0 == 2 ]; then echo "MASTER"; else echo "BACKUP"; fi') + ROUTER_STATE=$(ip addr | grep eth0 | grep inet | wc -l | xargs bash -c 'if [ $0 == 2 ]; then echo "PRIMARY"; else echo "BACKUP"; fi') STATUS=$ROUTER_STATE else ROUTER_STATE=$(ip addr | grep $INTERFACE | grep state | awk '{print $9;}') if [ $ROUTER_STATE = "UP" ] then - STATUS=MASTER + STATUS=PRIMARY elif [ $ROUTER_STATE = "DOWN" ] then STATUS=BACKUP diff --git a/systemvm/debian/opt/cloud/templates/keepalived.conf.templ b/systemvm/debian/opt/cloud/templates/keepalived.conf.templ index ca9f231a541e..a6bd66202413 100644 --- a/systemvm/debian/opt/cloud/templates/keepalived.conf.templ +++ b/systemvm/debian/opt/cloud/templates/keepalived.conf.templ @@ -48,7 +48,7 @@ vrrp_instance inside_network { heartbeat } - notify_backup "/opt/cloud/bin/master.py --backup" - notify_master "/opt/cloud/bin/master.py --master" - notify_fault "/opt/cloud/bin/master.py --fault" + notify_backup "/opt/cloud/bin/configure_router.py --backup" + notify_master "/opt/cloud/bin/configure_router.py --primary" + notify_fault "/opt/cloud/bin/configure_router.py --fault" } diff --git a/test/integration/component/maint/test_redundant_router.py b/test/integration/component/maint/test_redundant_router.py index 94ddae255f0e..fe27888bbd18 100644 --- a/test/integration/component/maint/test_redundant_router.py +++ b/test/integration/component/maint/test_redundant_router.py @@ -211,7 +211,7 @@ def test_createRvRNetwork(self): # - same public IP # - same MAC address of public NIC # - different guestip address - # - redundant state (MASTER or BACKUP) + # - redundant state (PRIMARY or BACKUP) # - same gateway for the public traffic # 6. all routers, networks and user VMs are cleaned up @@ -284,34 +284,34 @@ def test_createRvRNetwork(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] self.debug("Redundant states: %s, %s" % ( - master_router.redundantstate, + primary_router.redundantstate, backup_router.redundantstate )) self.assertEqual( - master_router.publicip, + primary_router.publicip, backup_router.publicip, - "Public Ip should be same for both(MASTER & BACKUP)" + "Public Ip should be same for both(PRIMARY & BACKUP)" ) self.assertEqual( - master_router.redundantstate, - "MASTER", - "Redundant state of router should be MASTER" + primary_router.redundantstate, + "PRIMARY", + "Redundant state of router should be PRIMARY" ) self.assertEqual( backup_router.redundantstate, @@ -319,15 +319,15 @@ def test_createRvRNetwork(self): "Redundant state of router should be BACKUP" ) self.assertNotEqual( - master_router.guestipaddress, + primary_router.guestipaddress, backup_router.guestipaddress, - "Both (MASTER & BACKUP) routers should not have same guest IP" + "Both (PRIMARY & BACKUP) routers should not have same guest IP" ) self.assertNotEqual( - master_router.guestmacaddress, + primary_router.guestmacaddress, backup_router.guestmacaddress, - "Both (MASTER & BACKUP) routers should not have same guestMAC" + "Both (PRIMARY & BACKUP) routers should not have same guestMAC" ) return @@ -413,7 +413,7 @@ def test_createRvRNetwork(self): # - same public IP # - same MAC address of public NIC # - different guestip address - # - redundant state (MASTER or BACKUP) + # - redundant state (PRIMARY or BACKUP) # - same gateway for the public traffic # 6. all routers, networks and user VMs are cleaned up @@ -498,30 +498,30 @@ def test_createRvRNetwork(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] self.assertEqual( - master_router.publicip, + primary_router.publicip, backup_router.publicip, - "Public Ip should be same for both(MASTER & BACKUP)" + "Public Ip should be same for both(PRIMARY & BACKUP)" ) self.assertEqual( - master_router.redundantstate, - "MASTER", - "Redundant state of router should be MASTER" + primary_router.redundantstate, + "PRIMARY", + "Redundant state of router should be PRIMARY" ) self.assertEqual( backup_router.redundantstate, @@ -529,15 +529,15 @@ def test_createRvRNetwork(self): "Redundant state of router should be BACKUP" ) self.assertNotEqual( - master_router.guestipaddress, + primary_router.guestipaddress, backup_router.guestipaddress, - "Both (MASTER & BACKUP) routers should not have same guest IP" + "Both (PRIMARY & BACKUP) routers should not have same guest IP" ) self.assertNotEqual( - master_router.guestmacaddress, + primary_router.guestmacaddress, backup_router.guestmacaddress, - "Both (MASTER & BACKUP) routers should not have same guestMAC" + "Both (PRIMARY & BACKUP) routers should not have same guestMAC" ) return @@ -622,13 +622,13 @@ def test_redundantVR_internals(self): # Validate the following: # 1. listNetworks lists network in Allocated state # 2. listRouters lists no routers created yet - # 3. listRouters returns Master and Backup routers + # 3. listRouters returns Primary and Backup routers # 4. ssh in to both routers and verify: - # - MASTER router has eth2 with public Ip address + # - PRIMARY router has eth2 with public Ip address # - BACKUP router has only guest eth0 and link local eth1 - # - Broadcast on MASTER eth2 is non-zero (0.0.0.0) + # - Broadcast on PRIMARY eth2 is non-zero (0.0.0.0) # - execute checkrouter.sh in router home and check if it is status - # "MASTER|BACKUP" as returned by the listRouters API + # "PRIMARY|BACKUP" as returned by the listRouters API # 5. DNS of the user VM is set to RedundantRouter Gateway # (/etc/resolv.conf) # Check that the default gateway for the guest is the rvr gateway @@ -703,35 +703,35 @@ def test_redundantVR_internals(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] self.debug("Fetching the host details for double hop into router") hosts = Host.list( self.apiclient, - id=master_router.hostid + id=primary_router.hostid ) self.assertEqual( isinstance(hosts, list), True, "List hosts should return a valid list" ) - master_host = hosts[0] - self.debug("Host for master router: %s" % master_host.name) - self.debug("Host for master router: %s" % master_host.ipaddress) + primary_host = hosts[0] + self.debug("Host for primary router: %s" % primary_host.name) + self.debug("Host for primary router: %s" % primary_host.ipaddress) hosts = Host.list( self.apiclient, @@ -745,37 +745,37 @@ def test_redundantVR_internals(self): backup_host = hosts[0] self.debug("Host for backup router: %s" % backup_host.name) self.debug("Host for backup router: %s" % backup_host.ipaddress) - self.debug(master_router.linklocalip) + self.debug(primary_router.linklocalip) - # Check eth2 port for master router + # Check eth2 port for primary router if self.hypervisor.lower() in ('vmware', 'hyperv'): result = get_process_status( self.apiclient.connection.mgtSvr, 22, self.apiclient.connection.user, self.apiclient.connection.passwd, - master_router.linklocalip, + primary_router.linklocalip, 'ip addr show eth2', hypervisor=self.hypervisor ) else: result = get_process_status( - master_host.ipaddress, + primary_host.ipaddress, 22, self.testdata['configurableData']['host']["username"], self.testdata['configurableData']['host']["password"], - master_router.linklocalip, + primary_router.linklocalip, "ip addr show eth2" ) res = str(result) self.debug("Command 'ip addr show eth2': %s" % result) - self.debug("Router's public Ip: %s" % master_router.publicip) + self.debug("Router's public Ip: %s" % primary_router.publicip) self.assertEqual( res.count("state UP"), 1, - "MASTER router's public interface should be UP" + "PRIMARY router's public interface should be UP" ) self.assertEqual( result.count('brd 0.0.0.0'), @@ -831,8 +831,8 @@ def test_redundantVR_internals(self): self.assertNotEqual( vm.nic[0].gateway, - master_router.publicip, - "The gateway of user VM should be same as master router" + primary_router.publicip, + "The gateway of user VM should be same as primary router" ) self.assertNotEqual( @@ -943,8 +943,8 @@ def tearDown(self): return @attr(tags=["advanced", "advancedns", "ssh"]) - def test_01_stopMasterRvR(self): - """Test stop master RVR + def test_01_stopPrimaryRvR(self): + """Test stop primary RVR """ # Steps to validate @@ -954,17 +954,17 @@ def test_01_stopMasterRvR(self): # network # 3. deployVM in above user account in the created network. VM is # successfully Running - # 4. listRouters that has redundantstate=MASTER. only one router is - # returned with redundantstate = MASTER for this network - # 5. stopRouter that is Master. Router goes to stopped state + # 4. listRouters that has redundantstate=PRIMARY. only one router is + # returned with redundantstate = PRIMARY for this network + # 5. stopRouter that is Primary. Router goes to stopped state # successfully - # 6. listRouters in the account and in the network. Lists old MASTER + # 6. listRouters in the account and in the network. Lists old PRIMARY # router in redundantstate=UNKNOWN, and the old BACKUP router as - # new MASTER + # new PRIMARY # 7. start the stopped router. Stopped rvr starts up successfully and # is in Running state # 8. listRouters in the account and in the network. Router shows up as - # BACKUP and NOT MASTER, should have only one BACKUP and one MASTER + # BACKUP and NOT PRIMARY, should have only one BACKUP and one PRIMARY # at the end, public IP of the SourceNAT should remain same after # reboot # 9. delete the account @@ -978,26 +978,26 @@ def test_01_stopMasterRvR(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] - self.debug("Stopping the MASTER router") + self.debug("Stopping the PRIMARY router") try: - Router.stop(self.apiclient, id=master_router.id) + Router.stop(self.apiclient, id=primary_router.id) except Exception as e: - self.fail("Failed to stop master router: %s" % e) + self.fail("Failed to stop primary router: %s" % e) # wait for VR to update state time.sleep(self.testdata["sleep"]) @@ -1005,17 +1005,17 @@ def test_01_stopMasterRvR(self): self.debug("Listing routers for network: %s" % self.network.name) routers = Router.list( self.apiclient, - id=master_router.id, + id=primary_router.id, listall=True ) self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertIn( routers[0].redundantstate, [ - 'UNKNOWN', 'FAULT'], "Redundant state of the master router\ + 'UNKNOWN', 'FAULT'], "Redundant state of the primary router\ should be UNKNOWN/FAULT but is %s" % routers[0].redundantstate) @@ -1034,26 +1034,26 @@ def test_01_stopMasterRvR(self): ) self.assertEqual( routers[0].redundantstate, - 'MASTER', - "Redundant state of the router should be MASTER but is %s" % + 'PRIMARY', + "Redundant state of the router should be PRIMARY but is %s" % routers[0].redundantstate) - self.debug("Starting the old MASTER router") + self.debug("Starting the old PRIMARY router") try: - Router.start(self.apiclient, id=master_router.id) - self.debug("old MASTER router started") + Router.start(self.apiclient, id=primary_router.id) + self.debug("old PRIMARY router started") except Exception as e: - self.fail("Failed to start master router: %s" % e) + self.fail("Failed to start primary router: %s" % e) # wait for VR to update state time.sleep(self.testdata["sleep"]) self.debug( - "Checking state of the master router in %s" % + "Checking state of the primary router in %s" % self.network.name) routers = Router.list( self.apiclient, - id=master_router.id, + id=primary_router.id, listall=True ) self.assertEqual( @@ -1067,7 +1067,7 @@ def test_01_stopMasterRvR(self): "Redundant state of the router should be BACKUP but is %s" % routers[0].redundantstate) self.assertEqual( - master_router.publicip, + primary_router.publicip, routers[0].publicip, "Public IP should be same after reboot" ) @@ -1085,16 +1085,16 @@ def test_02_stopBackupRvR(self): # network # 3. deployVM in above user account in the created network. VM is # successfully Running - # 4. listRouters that has redundantstate=MASTER. only one router is - # returned with redundantstate = MASTER for this network + # 4. listRouters that has redundantstate=PRIMARY. only one router is + # returned with redundantstate = PRIMARY for this network # 5. stopRouter that is BACKUP. Router goes to stopped state # successfully - # 6. listRouters in the account and in the network. Lists old MASTER + # 6. listRouters in the account and in the network. Lists old PRIMARY # router in redundantstate=UNKNOWN # 7. start the stopped router. Stopped rvr starts up successfully and # is in Running state # 8. listRouters in the account and in the network. Router shows up as - # BACKUP and NOT MASTER, should have only one BACKUP and one MASTER + # BACKUP and NOT PRIMARY, should have only one BACKUP and one PRIMARY # at the end, public IP of the SourceNAT should remain same after # reboot # 9. delete the account @@ -1108,19 +1108,19 @@ def test_02_stopBackupRvR(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] self.debug("Stopping the BACKUP router") @@ -1143,7 +1143,7 @@ def test_02_stopBackupRvR(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertIn( routers[0].redundantstate, [ @@ -1152,22 +1152,22 @@ def test_02_stopBackupRvR(self): routers[0].redundantstate) self.debug( - "Checking state of the master router in %s" % + "Checking state of the primary router in %s" % self.network.name) routers = Router.list( self.apiclient, - id=master_router.id, + id=primary_router.id, listall=True ) self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( routers[0].redundantstate, - 'MASTER', - "Redundant state of the router should be MASTER but is %s" % + 'PRIMARY', + "Redundant state of the router should be PRIMARY but is %s" % routers[0].redundantstate) self.debug("Starting the old BACKUP router") @@ -1175,7 +1175,7 @@ def test_02_stopBackupRvR(self): Router.start(self.apiclient, id=backup_router.id) self.debug("old BACKUP router started") except Exception as e: - self.fail("Failed to stop master router: %s" % e) + self.fail("Failed to stop primary router: %s" % e) # wait for VR to start and update state time.sleep(self.testdata["sleep"]) @@ -1206,8 +1206,8 @@ def test_02_stopBackupRvR(self): return @attr(tags=["advanced", "advancedns", "ssh"]) - def test_03_rebootMasterRvR(self): - """Test reboot master RVR + def test_03_rebootPrimaryRvR(self): + """Test reboot primary RVR """ # Steps to validate @@ -1217,12 +1217,12 @@ def test_03_rebootMasterRvR(self): # network # 3. deployVM in above user account in the created network. VM is # successfully Running - # 4. listRouters that has redundantstate=MASTER. only one router is - # returned with redundantstate = MASTER for this network - # 5. reboot router that is MASTER. Router reboots state + # 4. listRouters that has redundantstate=PRIMARY. only one router is + # returned with redundantstate = PRIMARY for this network + # 5. reboot router that is PRIMARY. Router reboots state # successfully - # 6. lists old MASTER router in redundantstate=BACKUP and the old - # BACKUP router as new MASTER + public IP of the SourceNAT should + # 6. lists old PRIMARY router in redundantstate=BACKUP and the old + # BACKUP router as new PRIMARY + public IP of the SourceNAT should # remain same after the reboot self.debug("Listing routers for network: %s" % self.network.name) @@ -1234,42 +1234,42 @@ def test_03_rebootMasterRvR(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] - self.debug("Rebooting the master router") + self.debug("Rebooting the primary router") try: - Router.reboot(self.apiclient, id=master_router.id) + Router.reboot(self.apiclient, id=primary_router.id) except Exception as e: - self.fail("Failed to reboot MASTER router: %s" % e) + self.fail("Failed to reboot PRIMARY router: %s" % e) # wait for VR to update state time.sleep(self.testdata["sleep"]) self.debug( - "Checking state of the master router in %s" % + "Checking state of the primary router in %s" % self.network.name) routers = Router.list( self.apiclient, - id=master_router.id, + id=primary_router.id, listall=True ) self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( routers[0].redundantstate, @@ -1288,15 +1288,15 @@ def test_03_rebootMasterRvR(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( routers[0].redundantstate, - 'MASTER', - "Redundant state of the router should be MASTER but is %s" % + 'PRIMARY', + "Redundant state of the router should be PRIMARY but is %s" % routers[0].redundantstate) self.assertEqual( - master_router.publicip, + primary_router.publicip, routers[0].publicip, "Public IP should be same after reboot" ) @@ -1314,12 +1314,12 @@ def test_04_rebootBackupRvR(self): # network # 3. deployVM in above user account in the created network. VM is # successfully Running - # 4. listRouters that has redundantstate=MASTER. only one router is - # returned with redundantstate = MASTER for this network + # 4. listRouters that has redundantstate=PRIMARY. only one router is + # returned with redundantstate = PRIMARY for this network # 5. reboot router that is BACKUP. Router reboots state # successfully # 6. lists old BACKUP router in redundantstate=BACKUP, and the old - # MASTER router is still MASTER+ public IP of the SourceNAT should + # PRIMARY router is still PRIMARY+ public IP of the SourceNAT should # remain same after the reboot self.debug("Listing routers for network: %s" % self.network.name) @@ -1331,19 +1331,19 @@ def test_04_rebootBackupRvR(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] self.debug("Rebooting the backup router") @@ -1366,7 +1366,7 @@ def test_04_rebootBackupRvR(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( routers[0].redundantstate, @@ -1375,25 +1375,25 @@ def test_04_rebootBackupRvR(self): routers[0].redundantstate) self.debug( - "Checking state of the master router in %s" % + "Checking state of the Primary router in %s" % self.network.name) routers = Router.list( self.apiclient, - id=master_router.id, + id=primary_router.id, listall=True ) self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( routers[0].redundantstate, - 'MASTER', - "Redundant state of the router should be MASTER but is %s" % + 'PRIMARY', + "Redundant state of the router should be PRIMARY but is %s" % routers[0].redundantstate) self.assertEqual( - master_router.publicip, + primary_router.publicip, routers[0].publicip, "Public IP should be same after reboot" ) @@ -1411,8 +1411,8 @@ def test_05_stopBackupRvR_startInstance(self): # network # 3. deployVM in above user account in the created network. VM is # successfully Running - # 4. listRouters that has redundantstate=MASTER. only one router is - # returned with redundantstate = MASTER for this network + # 4. listRouters that has redundantstate=PRIMARY. only one router is + # returned with redundantstate = PRIMARY for this network # 5. stop router that is BACKUP. # 6. listRouters in the account and in the network # 7. deployVM in the user account in the created network @@ -1428,15 +1428,15 @@ def test_05_stopBackupRvR_startInstance(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - if routers[0].redundantstate == 'MASTER': + if routers[0].redundantstate == 'PRIMARY': backup_router = routers[1] else: backup_router = routers[0] @@ -1461,7 +1461,7 @@ def test_05_stopBackupRvR_startInstance(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertIn( routers[0].redundantstate, @@ -1512,7 +1512,7 @@ def test_05_stopBackupRvR_startInstance(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( routers[0].redundantstate, @@ -1537,9 +1537,9 @@ def updateNetwork(self, conn): - def get_master_and_backupRouter(self): + def get_primary_and_backupRouter(self): retry = 4 - master_router = backup_router=None + primary_router = backup_router=None while retry > 0: routers = Router.list( self.apiclient, @@ -1549,22 +1549,22 @@ def get_master_and_backupRouter(self): retry = retry-1 if len(routers) < 2: continue - if not (routers[0].redundantstate == 'MASTER' or routers[1].redundantstate == 'MASTER'): + if not (routers[0].redundantstate == 'PRIMARY' or routers[1].redundantstate == 'PRIMARY'): continue; - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] break else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] break - self.info("master_router: %s, backup_router: %s" % (master_router, backup_router)) - return master_router, backup_router + self.info("primary_router: %s, backup_router: %s" % (primary_router, backup_router)) + return primary_router, backup_router def chek_for_new_backupRouter(self,old_backup_router): - master_router, backup_router = self.get_master_and_backupRouter() + primary_router, backup_router = self.get_primary_and_backupRouter() retry = 4 self.info("Checking if new router is getting created.") self.info("old_backup_router:"+old_backup_router.name+" new_backup_router:"+backup_router.name) @@ -1574,7 +1574,7 @@ def chek_for_new_backupRouter(self,old_backup_router): if retry == 0: break; time.sleep(self.testdata["sleep"]) - master_router, backup_router = self.get_master_and_backupRouter() + primary_router, backup_router = self.get_primary_and_backupRouter() if retry == 0: self.fail("New router creation taking too long, timed out") @@ -1602,18 +1602,18 @@ def test_06_updateVRs_in_sequence(self): # Steps to validate # update network to a new offering - # check if the master router is running while backup is starting. - # check if the backup is running while master is starting. + # check if the primary router is running while backup is starting. + # check if the backup is running while primary is starting. # check if both the routers are running after the update is complete. #clean up the network to make sure it is in proper state. self.network.restart(self.apiclient,cleanup=True) time.sleep(self.testdata["sleep"]) self.wait_untill_router_stabilises() - old_master_router, old_backup_router = self.get_master_and_backupRouter() - self.info("old_master_router:"+old_master_router.name+" old_backup_router"+old_backup_router.name) + old_primary_router, old_backup_router = self.get_primary_and_backupRouter() + self.info("old_primary_router:"+old_primary_router.name+" old_backup_router"+old_backup_router.name) #chek if the network is in correct state - self.assertEqual(old_master_router.state, "Running", "The master router is not running, network is not in a correct state to start the test") + self.assertEqual(old_primary_router.state, "Running", "The primary router is not running, network is not in a correct state to start the test") self.assertEqual(old_backup_router.state, "Running", "The backup router is not running, network is not in a correct state to start the test") worker, monitor = multiprocessing.Pipe() @@ -1627,30 +1627,30 @@ def test_06_updateVRs_in_sequence(self): self.info("Network update Started, the old backup router will get destroyed and a new router will be created") self.chek_for_new_backupRouter(old_backup_router) - master_router, new_backup_router=self.get_master_and_backupRouter() - #the state of the master router should be running. while backup is being updated - self.assertEqual(master_router.state, "Running", "State of the master router is not running") - self.assertEqual(master_router.redundantstate, 'MASTER', "Redundant state of the master router should be MASTER, but it is %s"%master_router.redundantstate) + primary_router, new_backup_router=self.get_primary_and_backupRouter() + #the state of the primary router should be running. while backup is being updated + self.assertEqual(primary_router.state, "Running", "State of the primary router is not running") + self.assertEqual(primary_router.redundantstate, 'PRIMARY', "Redundant state of the primary router should be PRIMARY, but it is %s"%primary_router.redundantstate) self.info("Old backup router:"+old_backup_router.name+" is destroyed and new router:"+new_backup_router.name+" got created") - #wait for the new backup to become master. + #wait for the new backup to become primary. retry = 4 - while new_backup_router.name != master_router.name: + while new_backup_router.name != primary_router.name: retry = retry-1 if retry == 0: break time.sleep(self.testdata["sleep"]) - self.info("wating for backup router to become master router name:"+new_backup_router.name) - master_router, backup_router = self.get_master_and_backupRouter() + self.info("wating for backup router to become primary router name:"+new_backup_router.name) + primary_router, backup_router = self.get_primary_and_backupRouter() if retry == 0: - self.fail("timed out while waiting for new backup router to change state to MASTER.") + self.fail("timed out while waiting for new backup router to change state to PRIMARY.") - #new backup router has become master. - self.info("newly created router:"+new_backup_router.name+" has changed state to Master") - self.info("old master router:"+old_master_router.name+"is destroyed") - #old master will get destroyed and a new backup will be created. + #new backup router has become primary. + self.info("newly created router:"+new_backup_router.name+" has changed state to Primary") + self.info("old primary router:"+old_primary_router.name+"is destroyed") + #old primary will get destroyed and a new backup will be created. #wait until new backup changes state from unknown to backup - master_router, backup_router = self.get_master_and_backupRouter() + primary_router, backup_router = self.get_primary_and_backupRouter() retry = 4 while backup_router.redundantstate != 'BACKUP': retry = retry-1 @@ -1658,14 +1658,14 @@ def test_06_updateVRs_in_sequence(self): if retry == 0: break time.sleep(self.testdata["sleep"]) - master_router, backup_router = self.get_master_and_backupRouter() - self.assertEqual(master_router.state, "Running", "State of the master router is not running") - self.assertEqual(master_router.redundantstate, 'MASTER', "Redundant state of the master router should be MASTER, but it is %s"%master_router.redundantstate) + primary_router, backup_router = self.get_primary_and_backupRouter() + self.assertEqual(primary_router.state, "Running", "State of the primary router is not running") + self.assertEqual(primary_router.redundantstate, 'PRIMARY', "Redundant state of the primary router should be PRIMARY, but it is %s"%primary_router.redundantstate) if retry == 0: - self.fail("timed out while waiting for new backup rotuer to change state to MASTER.") + self.fail("timed out while waiting for new backup rotuer to change state to PRIMARY.") #the network update is complete.finally both the router should be running. - new_master_router, new_backup_router=self.get_master_and_backupRouter() - self.assertEqual(new_master_router.state, "Running", "State of the master router:"+new_master_router.name+" is not running") + new_primary_router, new_backup_router=self.get_primary_and_backupRouter() + self.assertEqual(new_primary_router.state, "Running", "State of the primary router:"+new_primary_router.name+" is not running") self.assertEqual(new_backup_router.state, "Running", "State of the backup router:"+new_backup_router.name+" is not running") worker_process.join() diff --git a/test/integration/component/maint/test_redundant_router_deployment_planning.py b/test/integration/component/maint/test_redundant_router_deployment_planning.py index b63cda94e58b..eb68c435250b 100644 --- a/test/integration/component/maint/test_redundant_router_deployment_planning.py +++ b/test/integration/component/maint/test_redundant_router_deployment_planning.py @@ -215,7 +215,7 @@ def test_RvR_multipods(self): # 1. listNetworkOfferings should show created offering for RvR # 2. listNetworks should show the created network in allocated state # 3. VM should be deployed and in Running state - # 4. There should be two routers (MASTER and BACKUP) for this network + # 4. There should be two routers (PRIMARY and BACKUP) for this network # ensure both routers should be on different pods self.debug("Checking if the current zone has 2 active pods in it..") @@ -317,12 +317,12 @@ def test_RvR_multipods(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) self.assertNotEqual( routers[0].podid, @@ -350,7 +350,7 @@ def test_RvR_multicluster(self): # 1. listNetworkOfferings should show created offering for RvR # 2. listNetworks should show the created network in allocated state # 3. VM should be deployed and in Running state - # 4. There should be two routers (MASTER and BACKUP) for this network + # 4. There should be two routers (PRIMARY and BACKUP) for this network # ensure both routers should be on different pods self.debug("Checking if the current zone has 2 active pods in it..") @@ -481,12 +481,12 @@ def test_RvR_multicluster(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) hosts = Host.list( @@ -555,7 +555,7 @@ def test_RvR_multiprimarystorage(self): # 2. listNetworks should show the created network in allocated state # 3. VM should be deployed and in Running state and on the specified # host - # 4. There should be two routers (MASTER and BACKUP) for this network + # 4. There should be two routers (PRIMARY and BACKUP) for this network # ensure both routers should be on different storage pools self.debug( @@ -732,12 +732,12 @@ def test_RvR_multiprimarystorage(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) self.assertNotEqual( routers[0].hostid, @@ -792,7 +792,7 @@ def test_RvR_multihosts(self): # 1. listNetworkOfferings should show created offering for RvR # 2. listNetworks should show the created network in allocated state # 3. VM should be deployed and in Running state and on specified host - # 4. There should be two routers (MASTER and BACKUP) for this network + # 4. There should be two routers (PRIMARY and BACKUP) for this network # ensure both routers should be on different hosts self.debug( @@ -969,12 +969,12 @@ def test_RvR_multihosts(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) self.assertNotEqual( routers[0].hostid, diff --git a/test/integration/component/maint/test_redundant_router_network_rules.py b/test/integration/component/maint/test_redundant_router_network_rules.py index da873823eae4..7348d1629530 100644 --- a/test/integration/component/maint/test_redundant_router_network_rules.py +++ b/test/integration/component/maint/test_redundant_router_network_rules.py @@ -198,14 +198,14 @@ def tearDown(self): @attr(tags=["advanced", "advancedns", "ssh"], required_hardware="true") def test_networkRules_afterRebootRouters(self): - """Test network rules after master & backup routers rebooted + """Test network rules after primary & backup routers rebooted """ # Steps to validate # 1. listNetworks should show the created network in allocated state # 2. listRouters returns no running routers # 3. VMs should be deployed and in Running state - # 4. should list MASTER and BACKUP routers + # 4. should list PRIMARY and BACKUP routers # 5. listPublicIpAddresses for networkid should show acquired IP addr # 6. listStaticNats for the network associated # 7. listFirewallRules should show allowed ports open @@ -217,9 +217,9 @@ def test_networkRules_afterRebootRouters(self): # 13 and 14. listLoadBalancerRules should show associated VMs for # public IP # 15. ssh should succeed to the user VMs - # 16. listRouters should show one Router in MASTER state and Running + # 16. listRouters should show one Router in PRIMARY state and Running # 17. ssh should work for PF, FW, and LB ips - # 18. listRouters should show both routers MASTER and BACKUP in + # 18. listRouters should show both routers PRIMARY and BACKUP in # Running state # 19. listPortForwardingRules, listFirewallRules, listLoadBalancerRule # should return empty response @@ -308,19 +308,19 @@ def test_networkRules_afterRebootRouters(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Priamry and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] self.debug("Associating public IP for network: %s" % network.name) @@ -435,11 +435,11 @@ def test_networkRules_afterRebootRouters(self): )) lb_rule.assign(self.apiclient, [virtual_machine]) - self.debug("Starting router ID: %s" % master_router.id) + self.debug("Starting router ID: %s" % primary_router.id) for router in routers: try: - self.debug("Rebooting router ID: %s" % master_router.id) + self.debug("Rebooting router ID: %s" % primary_router.id) #Stop the router cmd = rebootRouter.rebootRouterCmd() cmd.id = router.id @@ -456,12 +456,12 @@ def test_networkRules_afterRebootRouters(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) for router in routers: self.assertEqual( @@ -510,7 +510,7 @@ def test_applyRules_restartRvRNetwork(self): # 1. listNetworks should show the created network in allocated state # 2. listRouters returns no running routers # 3. VMs should be deployed and in Running state - # 4. should list MASTER and BACKUP routers + # 4. should list PRIMARY and BACKUP routers # 5. listPublicIpAddresses for networkid should show acquired IP addr # 6. listStaticNats for the network associated # 7. listFirewallRules should show allowed ports open @@ -522,10 +522,10 @@ def test_applyRules_restartRvRNetwork(self): # 13 and 14. listLoadBalancerRules should show associated VMs for # public IP # 15. ssh should succeed to the user VMs - # 16. listRouters should show one Router in MASTER state and Running & + # 16. listRouters should show one Router in PRIMARY state and Running & # one in BACKUP and Running # 17. ssh should work for PF, FW, and LB ips - # 18. listRouters should show one Router in MASTER state and Running & + # 18. listRouters should show one Router in PRIMARY state and Running & # one in BACKUP and Running # 19. ssh should work for PF, FW, and LB ips # 20. listPortForwardingRules, listFirewallRules, listLoadBalancerRule @@ -615,19 +615,19 @@ def test_applyRules_restartRvRNetwork(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] self.debug("Associating public IP for network: %s" % network.name) @@ -759,12 +759,12 @@ def test_applyRules_restartRvRNetwork(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) for router in routers: self.assertEqual( @@ -819,12 +819,12 @@ def test_applyRules_restartRvRNetwork(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) for router in routers: self.assertEqual( @@ -872,7 +872,7 @@ def test_apply_and__delete_NetworkRulesOnRvR(self): # 1. listNetworks should show the created network in allocated state # 2. listRouters returns no running routers # 3. VMs should be deployed and in Running state - # 4. should list MASTER and BACKUP routers + # 4. should list PRIMARY and BACKUP routers # 5. listPublicIpAddresses for networkid should show acquired IP # 6. listRemoteAccessVpns for the network associated should show the # VPN created @@ -962,12 +962,12 @@ def test_apply_and__delete_NetworkRulesOnRvR(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) self.debug("Associating public IP for network: %s" % network.name) @@ -1114,15 +1114,15 @@ def test_apply_and__delete_NetworkRulesOnRvR(self): return @attr(tags=["advanced", "advancedns", "ssh", "needle"], required_hardware="true") - def test_applyNetworkRules_MasterDown_deleteNetworkRules(self): - """Test apply network rules when master down and delete network rules + def test_applyNetworkRules_PrimaryDown_deleteNetworkRules(self): + """Test apply network rules when primary down and delete network rules """ # Steps to validate # 1. listNetworks should show the created network in allocated state # 2. listRouters returns no running routers # 3. VMs should be deployed and in Running state - # 4. should list MASTER and BACKUP routers + # 4. should list PRIMARY and BACKUP routers # 5. listPublicIpAddresses for networkid should show acquired IP addr # 6. listStaticNats for the network associated # 7. listFirewallRules should show allowed ports open @@ -1134,9 +1134,9 @@ def test_applyNetworkRules_MasterDown_deleteNetworkRules(self): # 13 and 14. listLoadBalancerRules should show associated VMs for # public IP # 15. ssh should succeed to the user VMs - # 16. listRouters should show one Router in MASTER state and Running + # 16. listRouters should show one Router in PRIMARY state and Running # 17. ssh should work for PF, FW, and LB ips - # 18. listRouters should show both routers MASTER and BACKUP in + # 18. listRouters should show both routers PRIMARY and BACKUP in # Running state # 19. listPortForwardingRules, listFirewallRules, listLoadBalancerRule # should return empty response @@ -1229,27 +1229,27 @@ def test_applyNetworkRules_MasterDown_deleteNetworkRules(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] - self.debug("Stopping router ID: %s" % master_router.id) + self.debug("Stopping router ID: %s" % primary_router.id) try: - Router.stop(self.apiclient, id=master_router.id) + Router.stop(self.apiclient, id=primary_router.id) except Exception as e: - self.fail("Failed to stop master router becaues of %s" % e) + self.fail("Failed to stop primary router becaues of %s" % e) self.debug("Associating public IP for network: %s" % network.name) public_ip = PublicIPAddress.create( @@ -1394,12 +1394,12 @@ def test_applyNetworkRules_MasterDown_deleteNetworkRules(self): except Exception as e: self.fail("SSH to guest VM failed: %s" % e) - self.debug("Starting router ID: %s" % master_router.id) + self.debug("Starting router ID: %s" % primary_router.id) try: - Router.start(self.apiclient, id=master_router.id) + Router.start(self.apiclient, id=primary_router.id) except Exception as e: - self.fail("Failed to start master router..") + self.fail("Failed to start primary router..") self.debug("Listing routers for network: %s" % network.name) routers = Router.list( @@ -1410,12 +1410,12 @@ def test_applyNetworkRules_MasterDown_deleteNetworkRules(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) for router in routers: self.assertEqual( diff --git a/test/integration/component/test_acl_isolatednetwork.py b/test/integration/component/test_acl_isolatednetwork.py index a567c26ef69f..a1deb93b2623 100644 --- a/test/integration/component/test_acl_isolatednetwork.py +++ b/test/integration/component/test_acl_isolatednetwork.py @@ -364,7 +364,7 @@ def tearDown(self): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_01_createNetwork_admin(self): """ - # Validate that Admin should be able to create network for himslef + # Validate that Admin should be able to create network with self-ownership """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -380,12 +380,12 @@ def test_01_createNetwork_admin(self): self.cleanup.append(network) self.assertEqual(network.state.lower() == ALLOCATED.lower(), True, - "Admin User is not able to create a network for himself") + "Admin User is not able to create a network with self-ownership") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_02_createNetwork_admin_foruserinsamedomain(self): """ - # Validate that Admin should be able to create network for users in his domain + # Validate that Admin should be able to create network for users in their domain """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -403,13 +403,13 @@ def test_02_createNetwork_admin_foruserinsamedomain(self): self.cleanup.append(network) self.assertEqual(network.state.lower() == ALLOCATED.lower(), True, - "Admin User is not able to create a network for other users in his domain") + "Admin User is not able to create a network for other users in their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_03_createNetwork_admin_foruserinotherdomain(self): """ - # Validate that Admin should be able to create network for users in his sub domain + # Validate that Admin should be able to create network for users in their sub domain """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -435,7 +435,7 @@ def test_03_createNetwork_admin_foruserinotherdomain(self): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_04_createNetwork_domaindmin(self): """ - # Validate that Domain admin should be able to create network for himslef + # Validate that Domain admin should be able to create network with self-ownership """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -451,13 +451,13 @@ def test_04_createNetwork_domaindmin(self): self.cleanup.append(network) self.assertEqual(network.state.lower() == ALLOCATED.lower(), True, - "Domain admin User is not able to create a network for himself") + "Domain admin User is not able to create a network with self-ownership") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_05_createNetwork_domaindmin_foruserinsamedomain(self): """ - # Validate that Domain admin should be able to create network for users in his domain + # Validate that Domain admin should be able to create network for users in their domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -475,13 +475,13 @@ def test_05_createNetwork_domaindmin_foruserinsamedomain(self): self.cleanup.append(network) self.assertEqual(network.state.lower() == ALLOCATED.lower(), True, - "Domain admin User is not able to create a network for other users in his domain") + "Domain admin User is not able to create a network for other users in their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_06_createNetwork_domaindmin_foruserinsubdomain(self): """ - # Validate that Domain admin should be able to create network for users in his sub domain + # Validate that Domain admin should be able to create network for users in their sub domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -499,13 +499,13 @@ def test_06_createNetwork_domaindmin_foruserinsubdomain(self): self.cleanup.append(network) self.assertEqual(network.state.lower() == ALLOCATED.lower(), True, - "Domain admin User is not able to create a network for other users in his sub domain") + "Domain admin User is not able to create a network for other users in their sub domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_07_createNetwork_domaindmin_forcrossdomainuser(self): """ - # Validate that Domain admin should not be able to create network for users in his sub domain + # Validate that Domain admin should not be able to create network for users in their sub domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -521,18 +521,18 @@ def test_07_createNetwork_domaindmin_forcrossdomainuser(self): domainid=self.account_d2a.domainid ) self.cleanup.append(network) - self.fail("Domain admin is allowed to create network for users not in his domain ") + self.fail("Domain admin is allowed to create network for users not in their domain ") except Exception as e: - self.debug("When Domain admin tries to create network for users in his sub domain %s" % e) + self.debug("When Domain admin tries to create network for users in their sub domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN): - self.fail("Error message validation failed when Domain admin tries to create network for users not in his domain ") + self.fail("Error message validation failed when Domain admin tries to create network for users not in their domain ") ## Test cases relating to createNetwork as regular user @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_08_createNetwork_user(self): """ - # Validate that Regular should be able to create network for himslef + # Validate that Regular should be able to create network with self-ownership """ self.apiclient.connection.apiKey = self.user_d1a_apikey self.apiclient.connection.securityKey = self.user_d1a_secretkey @@ -549,12 +549,12 @@ def test_08_createNetwork_user(self): self.assertEqual(network.state.lower() == ALLOCATED.lower(), True, - "User is not able to create a network for himself") + "User is not able to create a network with self-ownership") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_09_createNetwork_user_foruserinsamedomain(self): """ - # Validate that Regular user should NOT be able to create network for users in his domain + # Validate that Regular user should NOT be able to create network for users in their domain """ self.apiclient.connection.apiKey = self.user_d1a_apikey self.apiclient.connection.securityKey = self.user_d1a_secretkey @@ -571,11 +571,11 @@ def test_09_createNetwork_user_foruserinsamedomain(self): domainid=self.account_d1b.domainid ) self.cleanup.append(network) - self.fail("User is allowed to create network for other users in his domain ") + self.fail("User is allowed to create network for other users in their domain ") except Exception as e: - self.debug("When user tries to create network for users in his domain %s" % e) + self.debug("When user tries to create network for users in their domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.UNABLE_TO_LIST_NETWORK_ACCOUNT): - self.fail("Error message validation failed when when User tries to create network for other users in his domain ") + self.fail("Error message validation failed when when User tries to create network for other users in their domain ") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_10_createNetwork_user_foruserinotherdomain(self): @@ -597,18 +597,18 @@ def test_10_createNetwork_user_foruserinotherdomain(self): domainid=self.account_d11a.domainid ) self.cleanup.append(network) - self.fail("User is allowed to create network for users not in his domain ") + self.fail("User is allowed to create network for users not in their domain ") except Exception as e: self.debug("When user tries to create network for users in other domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.UNABLE_TO_LIST_NETWORK_ACCOUNT): - self.fail("Error message validation failed when User tries to create network for users not in his domain ") + self.fail("Error message validation failed when User tries to create network for users not in their domain ") ## Test cases relating to Deploying VM in a network as admin user @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_11_deployvm_admin(self): """ - # Validate that Admin should be able to deploy VM in the networks he owns + # Validate that Admin should be able to deploy VM in the networks if it is self-owned """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -626,12 +626,12 @@ def test_11_deployvm_admin(self): self.assertEqual(vm.state.lower() == RUNNING.lower(), True, - "Admin User is not able to deploy VM in his own network") + "Admin User is not able to deploy VM in their own network") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_12_deployvm_admin_foruserinsamedomain(self): """ - # Validate that Admin should be able to deploy Vm for users in his domain + # Validate that Admin should be able to deploy Vm for users in their domain """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -650,12 +650,12 @@ def test_12_deployvm_admin_foruserinsamedomain(self): self.cleanup.append(vm) self.assertEqual(vm.state.lower() == RUNNING.lower() and vm.account == self.account_roota.name and vm.domainid == self.account_roota.domainid, True, - "Admin User is not able to deploy VM for users in his domain") + "Admin User is not able to deploy VM for users in their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_13_deployvm_admin_foruserinotherdomain(self): """ - # Validate that Admin should be able to deploy VM for users in his sub domain + # Validate that Admin should be able to deploy VM for users in their sub domain """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -707,7 +707,7 @@ def test_13_1_deployvm_admin_foruserinotherdomain_crossnetwork(self): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_14_deployvm_domaindmin(self): """ - # Validate that Domain admin should be able to deploy vm for himslef + # Validate that Domain admin should be able to deploy vm with self-ownership """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -725,12 +725,12 @@ def test_14_deployvm_domaindmin(self): self.assertEqual(vm.state.lower() == RUNNING.lower(), True, - "Domain admin User is not able to deploy VM for himself") + "Domain admin User is not able to deploy VM with self-ownership") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_15_deployvm_domaindmin_foruserinsamedomain(self): """ - # Validate that Domain admin should be able to deploy vm for users in his domain + # Validate that Domain admin should be able to deploy vm for users in their domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -749,12 +749,12 @@ def test_15_deployvm_domaindmin_foruserinsamedomain(self): self.cleanup.append(vm) self.assertEqual(vm.state.lower() == RUNNING.lower() and vm.account == self.account_d1a.name and vm.domainid == self.account_d1a.domainid, True, - "Domain admin User is not able to deploy VM for other users in his domain") + "Domain admin User is not able to deploy VM for other users in their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_16_deployvm_domaindmin_foruserinsubdomain(self): """ - # Validate that Domain admin should be able to deploy vm for users in his sub domain + # Validate that Domain admin should be able to deploy vm for users in their sub domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -773,12 +773,12 @@ def test_16_deployvm_domaindmin_foruserinsubdomain(self): self.cleanup.append(vm) self.assertEqual(vm.state.lower() == RUNNING.lower() and vm.account == self.account_d11a.name and vm.domainid == self.account_d11a.domainid, True, - "Domain admin User is not able to deploy vm for himself") + "Domain admin User is not able to deploy vm with self-ownership") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_17_deployvm_domaindmin_forcrossdomainuser(self): """ - # Validate that Domain admin should not be able allowed to deploy vm for users not in his sub domain + # Validate that Domain admin should not be able allowed to deploy vm for users not in their sub domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -798,7 +798,7 @@ def test_17_deployvm_domaindmin_forcrossdomainuser(self): self.cleanup.append(vm) self.fail("Domain admin is allowed to deploy vm for users not in hos domain ") except Exception as e: - self.debug("When Domain admin tries to deploy vm for users in his sub domain %s" % e) + self.debug("When Domain admin tries to deploy vm for users in their sub domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN): self.fail("Error message validation failed when Domain admin tries to deploy vm for users not in hos domain ") @@ -822,18 +822,18 @@ def test_17_1_deployvm_domainadmin_foruserinotherdomain_crossnetwork(self): domainid=self.account_d11a.domainid ) self.cleanup.append(vm) - self.fail("Domain admin is allowed to deploy vm for users in a network that does not belong to him ") + self.fail("Domain admin is allowed to deploy vm for users in a network that is not self-owned ") except Exception as e: self.debug("When domain admin tries to deploy vm for users in network that does not belong to the user %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.UNABLE_TO_USE_NETWORK): - self.fail("Error message validation failed when Domain admin tries to deploy vm for users in a network that does not belong to him ") + self.fail("Error message validation failed when Domain admin tries to deploy vm for users in a network that is not self-owned ") ## Test cases relating to deploying VM as regular user @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_18_deployvm_user(self): """ - # Validate that Regular should be able to deploy vm for himslef + # Validate that Regular should be able to deploy vm with self-ownership """ self.apiclient.connection.apiKey = self.user_d1a_apikey self.apiclient.connection.securityKey = self.user_d1a_secretkey @@ -850,12 +850,12 @@ def test_18_deployvm_user(self): self.cleanup.append(vm) self.assertEqual(vm.state.lower() == RUNNING.lower(), True, - "User is not able to deploy vm for himself") + "User is not able to deploy vm with self-ownership") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_19_deployvm_user_foruserinsamedomain(self): """ - # Validate that Regular user should NOT be able to deploy vm for users in his domain + # Validate that Regular user should NOT be able to deploy vm for users in their domain """ self.apiclient.connection.apiKey = self.user_d1a_apikey self.apiclient.connection.securityKey = self.user_d1a_secretkey @@ -873,16 +873,16 @@ def test_19_deployvm_user_foruserinsamedomain(self): domainid=self.account_d1b.domainid ) self.cleanup.append(vm) - self.fail("Regular user is allowed to deploy vm for other users in his domain ") + self.fail("Regular user is allowed to deploy vm for other users in their domain ") except Exception as e: - self.debug("When user tries to deploy vm for users in his domain %s" % e) + self.debug("When user tries to deploy vm for users in their domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT): - self.fail("Error message validation failed when Regular user tries to deploy vm for other users in his domain ") + self.fail("Error message validation failed when Regular user tries to deploy vm for other users in their domain ") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_20_deployvm_user_foruserincrossdomain(self): """ - # Validate that Regular user should NOT be able to deploy vm for users in his domain + # Validate that Regular user should NOT be able to deploy vm for users in their domain """ self.apiclient.connection.apiKey = self.user_d1a_apikey self.apiclient.connection.securityKey = self.user_d1a_secretkey @@ -900,16 +900,16 @@ def test_20_deployvm_user_foruserincrossdomain(self): domainid=self.account_d2a.domainid ) self.cleanup.append(vm) - self.fail("Regular user is allowed to deploy vm for users not in his domain ") + self.fail("Regular user is allowed to deploy vm for users not in their domain ") except Exception as e: self.debug("When user tries to deploy vm for users n different domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT): - self.fail("Error message validation failed when Regular user tries to deploy vm for users not in his domain ") + self.fail("Error message validation failed when Regular user tries to deploy vm for users not in their domain ") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_20_1_deployvm_user_incrossnetwork(self): """ - #Validate that User should not be able deploy VM in a network that does not belong to him + #Validate that User should not be able deploy VM in a network that is not self-owned """ self.apiclient.connection.apiKey = self.user_d11a_apikey self.apiclient.connection.securityKey = self.user_d11a_secretkey @@ -924,18 +924,18 @@ def test_20_1_deployvm_user_incrossnetwork(self): networkids=self.network_d11b.id, ) self.cleanup.append(vm) - self.fail("User is allowed to deploy VM in a network that does not belong to him ") + self.fail("User is allowed to deploy VM in a network that is not self-owned ") except Exception as e: self.debug("When user tries to deploy vm in a network that does not belong to him %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.UNABLE_TO_USE_NETWORK): - self.fail("Error message validation failed when User is allowed to deploy VM in a network that does not belong to him ") + self.fail("Error message validation failed when User is allowed to deploy VM in a network that is not self-owned ") ## Test cases relating to restart Network as admin user @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_21_restartNetwork_admin(self): """ - #Validate that Admin should be able to restart network for networks he owns + #Validate that Admin should be able to restart network for networks if it is self-owned """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -944,12 +944,12 @@ def test_21_restartNetwork_admin(self): self.assertEqual(restartResponse.success, True, - "Admin User is not able to restart network he owns") + "Admin User is not able to restart network if it is self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_22_restartNetwork_admin_foruserinsamedomain(self): """ - # Validate that Admin should be able to restart network for users in his domain + # Validate that Admin should be able to restart network for users in their domain """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -958,12 +958,12 @@ def test_22_restartNetwork_admin_foruserinsamedomain(self): self.assertEqual(restartResponse.success, True, - "Admin User is not able to restart network owned by users his domain") + "Admin User is not able to restart network owned by users their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_23_restartNetwork_admin_foruserinotherdomain(self): """ - # Validate that Admin should be able to restart network for users in his sub domain + # Validate that Admin should be able to restart network for users in their sub domain """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -980,7 +980,7 @@ def test_23_restartNetwork_admin_foruserinotherdomain(self): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_24_restartNetwork_domaindmin(self): """ - # Validate that Domain admin should be able to restart network for himslef + # Validate that Domain admin should be able to restart network with self-ownership """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -989,12 +989,12 @@ def test_24_restartNetwork_domaindmin(self): self.assertEqual(restartResponse.success, True, - "Domain admin User is not able to restart network for himself") + "Domain admin User is not able to restart network with self-ownership") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_25_restartNetwork_domaindmin_foruserinsamedomain(self): """ - # Validate that Domain admin should be able to restart network for users in his domain + # Validate that Domain admin should be able to restart network for users in their domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -1002,12 +1002,12 @@ def test_25_restartNetwork_domaindmin_foruserinsamedomain(self): restartResponse = self.network_d1a.restart(self.apiclient) self.assertEqual(restartResponse.success, True, - "Domain admin User is not able to restart network for other users in his domain") + "Domain admin User is not able to restart network for other users in their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_26_restartNetwork_domaindmin_foruserinsubdomain(self): """ - # Validate that Domain admin should be able to restart network for users in his sub domain + # Validate that Domain admin should be able to restart network for users in their sub domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -1015,30 +1015,30 @@ def test_26_restartNetwork_domaindmin_foruserinsubdomain(self): restartResponse = self.network_d11a.restart(self.apiclient) self.assertEqual(restartResponse.success, True, - "Domain admin User is not able to restart network he owns") + "Domain admin User is not able to restart network if it is self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_27_restartNetwork_domaindmin_forcrossdomainuser(self): """ - # Validate that Domain admin should be able to restart network for users in his sub domain + # Validate that Domain admin should be able to restart network for users in their sub domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey try: restartResponse = self.network_d2a.restart(self.apiclient) - self.fail("Domain admin is allowed to restart network for users not in his domain ") + self.fail("Domain admin is allowed to restart network for users not in their domain ") except Exception as e: - self.debug("When Domain admin tries to restart network for users in his sub domain %s" % e) + self.debug("When Domain admin tries to restart network for users in their sub domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN): - self.fail("Error message validation failed when Domain admin tries to restart network for users not in his domain ") + self.fail("Error message validation failed when Domain admin tries to restart network for users not in their domain ") ## Test cases relating restart network as regular user @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_28_restartNetwork_user(self): """ - #Validate that Regular should be able to restart network for himslef + #Validate that Regular should be able to restart network with self-ownership """ self.apiclient.connection.apiKey = self.user_d1a_apikey self.apiclient.connection.securityKey = self.user_d1a_secretkey @@ -1046,23 +1046,23 @@ def test_28_restartNetwork_user(self): restartResponse = self.network_d1a.restart(self.apiclient) self.assertEqual(restartResponse.success, True, - "User is not able to restart network he owns") + "User is not able to restart network if it is self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_29_restartNetwork_user_foruserinsamedomain(self): """ - #Validate that Regular user should NOT be able to restart network for users in his domain + #Validate that Regular user should NOT be able to restart network for users in their domain """ self.apiclient.connection.apiKey = self.user_d1a_apikey self.apiclient.connection.securityKey = self.user_d1a_secretkey try: restartResponse = self.network_d1b.restart(self.apiclient) - self.fail("Regular user is allowed to restart network for users in his domain ") + self.fail("Regular user is allowed to restart network for users in their domain ") except Exception as e: - self.debug("When user tries to restart network for users in his domain %s" % e) + self.debug("When user tries to restart network for users in their domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT): - self.fail("Error message validation failed when Regular user tries to restart network for users in his domain ") + self.fail("Error message validation failed when Regular user tries to restart network for users in their domain ") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_30_restartNetwork_user_foruserinotherdomain(self): @@ -1074,11 +1074,11 @@ def test_30_restartNetwork_user_foruserinotherdomain(self): try: restartResponse = self.network_d11a.restart(self.apiclient) - self.fail("Regular user is allowed to restart network for users not in his domain ") + self.fail("Regular user is allowed to restart network for users not in their domain ") except Exception as e: self.debug("When user tries to restart network for users in other domain %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT): - self.fail("Error message validation failed when Regular user is allowed to restart network for users not in his domain ") + self.fail("Error message validation failed when Regular user is allowed to restart network for users not in their domain ") @staticmethod def generateKeysForUser(apiclient, account): diff --git a/test/integration/component/test_acl_isolatednetwork_delete.py b/test/integration/component/test_acl_isolatednetwork_delete.py index 379ed3d6e966..bf464d6d6d91 100644 --- a/test/integration/component/test_acl_isolatednetwork_delete.py +++ b/test/integration/component/test_acl_isolatednetwork_delete.py @@ -348,7 +348,7 @@ def tearDown(cls): @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_admin(self): """ - Validate that Admin should be able to delete network he owns + Validate that Admin should be able to delete network that is self-owned """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -358,14 +358,14 @@ def test_deleteNetwork_admin(self): self.assertEqual(response, None, - "Admin User is not able to restart network he owns") + "Admin User is not able to restart network that is self-owned") self._cleanup.remove(self.network_root) @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_admin_foruserinsamedomain(self): """ - Validate that Admin should be able to delete network for users in his domain + Validate that Admin should be able to delete network for users in their domain """ self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -375,12 +375,12 @@ def test_deleteNetwork_admin_foruserinsamedomain(self): self.assertEqual(response, None, - "Admin User is not able to delete network owned by users his domain") + "Admin User is not able to delete network owned by users in their domain") self._cleanup.remove(self.network_roota) @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_admin_foruserinotherdomain(self): - # Validate that Admin should be able to delete network for users in his sub domain + # Validate that Admin should be able to delete network for users in their sub domain self.apiclient.connection.apiKey = self.user_root_apikey self.apiclient.connection.securityKey = self.user_root_secretkey @@ -397,7 +397,7 @@ def test_deleteNetwork_admin_foruserinotherdomain(self): @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_domaindmin(self): """ - Validate that Domain admin should be able to delete network for himslef + Validate that Domain admin should be able to delete network with self-ownership """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -407,13 +407,13 @@ def test_deleteNetwork_domaindmin(self): self.assertEqual(response, None, - "Domain admin User is not able to delete a network he owns") + "Domain admin User is not able to delete a network that is self-owned") self._cleanup.remove(self.network_d1) @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_domaindmin_foruserinsamedomain(self): """ - Validate that Domain admin should be able to delete network for users in his domain + Validate that Domain admin should be able to delete network for users in their domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -428,7 +428,7 @@ def test_deleteNetwork_domaindmin_foruserinsamedomain(self): @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_domaindmin_foruserinsubdomain(self): """ - Validate that Domain admin should be able to delete network for users in his sub domain + Validate that Domain admin should be able to delete network for users in their sub domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -444,7 +444,7 @@ def test_deleteNetwork_domaindmin_foruserinsubdomain(self): @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_domaindmin_forcrossdomainuser(self): """ - Validate that Domain admin should be able to delete network for users in his sub domain + Validate that Domain admin should be able to delete network for users in their sub domain """ self.apiclient.connection.apiKey = self.user_d1_apikey self.apiclient.connection.securityKey = self.user_d1_secretkey @@ -452,18 +452,18 @@ def test_deleteNetwork_domaindmin_forcrossdomainuser(self): try: response = self.network_d2a.delete(self.apiclient) self._cleanup.remove(self.network_d2a) - self.fail("Domain admin is allowed to delete network for users not in his domain ") + self.fail("Domain admin is allowed to delete network for users not in their domain ") except Exception as e: self.debug ("When Domain admin tries to delete network for user in a different domain %s" %e) if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN): - self.fail("Error message validation failed when Domain admin tries to delete network for users not in his domain ") + self.fail("Error message validation failed when Domain admin tries to delete network for users not in their domain ") ## Test cases relating deleting network as regular user @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_user(self): """ - Validate that Regular should be able to delete network for himslef + Validate that Regular should be able to delete network with self-ownership """ self.apiclient.connection.apiKey = self.user_d111a_apikey self.apiclient.connection.securityKey = self.user_d111a_secretkey @@ -473,13 +473,13 @@ def test_deleteNetwork_user(self): self.assertEqual(response, None, - "User is not able to delete a network he owns") + "User is not able to delete a network that is self-owned") self._cleanup.remove(self.network_d111a) @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_user_foruserinsamedomain(self): """ - Validate that Regular user should NOT be able to delete network for users in his domain + Validate that Regular user should NOT be able to delete network for users in their domain """ self.apiclient.connection.apiKey = self.user_d111a_apikey self.apiclient.connection.securityKey = self.user_d111a_secretkey @@ -487,11 +487,11 @@ def test_deleteNetwork_user_foruserinsamedomain(self): try: response = self.network_d111b.delete(self.apiclient) self._cleanup.remove(self.network_d111b) - self.fail("Regular user is allowed to delete network for users in his domain ") + self.fail("Regular user is allowed to delete network for users in their domain ") except Exception as e: - self.debug ("When user tries to delete network for users in his domain %s" %e) + self.debug ("When user tries to delete network for users in their domain %s" %e) if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT): - self.fail("Regular user is allowed to delete network for users in his domain ") + self.fail("Regular user is allowed to delete network for users in their domain ") @attr("simulator_only",tags=["advanced"],required_hardware="false") def test_deleteNetwork_user_foruserinotherdomain(self): @@ -505,11 +505,11 @@ def test_deleteNetwork_user_foruserinotherdomain(self): try: response = self.network_d11b.delete(self.apiclient) self._cleanup.remove(self.network_d11b) - self.fail("Regular user is allowed to delete network for users not in his domain ") + self.fail("Regular user is allowed to delete network for users not in their domain ") except Exception as e: self.debug ("When user tries to delete network for users in other domain %s" %e) if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT): - self.fail("Error message validation failed when Regular user tries to delete network for users not in his domain ") + self.fail("Error message validation failed when Regular user tries to delete network for users not in their domain ") @staticmethod def generateKeysForUser(apiclient,account): diff --git a/test/integration/component/test_acl_listsnapshot.py b/test/integration/component/test_acl_listsnapshot.py index be280b4f223f..8bf674ba6882 100644 --- a/test/integration/component/test_acl_listsnapshot.py +++ b/test/integration/component/test_acl_listsnapshot.py @@ -2623,7 +2623,7 @@ def test_listSnapshot_as_domainadmin_cross_domainid(self): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_domainadmin_owns(self): """ - Domain admin should be able to list Snapshots that he owns by passing uuid in "id" parameter + Domain admin should be able to list Snapshots that are self-owned by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2632,16 +2632,16 @@ def test_listSnapshot_by_id_as_domainadmin_owns(self): self.assertNotEqual(SnapshotList, None, - "Domain Admin is not able to list Snapshotss that he owns") + "Domain Admin is not able to list Snapshots that are self-owned") self.assertEqual(len(SnapshotList), 1, - "Domain Admin is not able to list Snapshotss that belongs to him") + "Domain Admin is not able to list Snapshots that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_domainadmin_ownedbyusersindomain(self): """ - Domain admin should be able to list Snapshots that is owned by any account in his domain by passing uuid in "id" parameter + Domain admin should be able to list Snapshots that is owned by any account in their domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2650,16 +2650,16 @@ def test_listSnapshot_by_id_as_domainadmin_ownedbyusersindomain(self): self.assertNotEqual(SnapshotList1, None, - "Domain Admin is not able to list Snapshotss from his domain") + "Domain Admin is not able to list Snapshots from their domain") self.assertEqual(len(SnapshotList1), 1, - "Domain Admin is not able to list Snapshotss from his domain") + "Domain Admin is not able to list Snapshots from their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_domainadmin_ownedbyusersinsubdomain(self): """ - Domain admin should be able to list Snapshots that is owned by any account in his sub-domain by passing uuid in "id" parameter + Domain admin should be able to list Snapshots that is owned by any account in their sub-domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2668,16 +2668,16 @@ def test_listSnapshot_by_id_as_domainadmin_ownedbyusersinsubdomain(self): self.assertNotEqual(SnapshotList2, None, - "Domain Admin is not able to list Snapshotss from his sub domain") + "Domain Admin is not able to list Snapshots from their sub domain") self.assertEqual(len(SnapshotList2), 1, - "Domain Admin is not able to list Snapshotss from his sub domain") + "Domain Admin is not able to list Snapshots from their sub domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_domainadmin_ownedbyusersnotindomain(self): """ - Domain admin should not be able to list Snapshots that is owned by account that is not in his domain by passing uuid in "id" parameter + Domain admin should not be able to list Snapshots that is owned by account that is not in their domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2686,12 +2686,12 @@ def test_listSnapshot_by_id_as_domainadmin_ownedbyusersnotindomain(self): self.assertEqual(SnapshotList3, None, - "Domain Admin is able to list Snapshotss from other domains!!!") + "Domain Admin is able to list Snapshots from other domains!!!") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_domainadmin_ownedbyusersinsubdomain2(self): """ - Domain admin should be able to list Snapshots that is owned by account that is in his sub domains by passing uuid in "id" parameter + Domain admin should be able to list Snapshots that is owned by account that is in their sub domains by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2700,16 +2700,16 @@ def test_listSnapshot_by_id_as_domainadmin_ownedbyusersinsubdomain2(self): self.assertNotEqual(SnapshotList4, None, - "Domain Admin is not able to list Snapshotss from his subdomain") + "Domain Admin is not able to list Snapshots from their subdomain") self.assertEqual(len(SnapshotList4), 1, - "Domain Admin is not able to list Snapshotss from his sub domains") + "Domain Admin is not able to list Snapshots from their sub domains") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_rootadmin_owns(self): """ - ROOT admin should be able to list Snapshots that is owned by account in his domains by passing uuid in "id" parameter + ROOT admin should be able to list Snapshots that is owned by account in their domains by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_a_apikey @@ -2717,10 +2717,10 @@ def test_listSnapshot_by_id_as_rootadmin_owns(self): SnapshotList1 = Snapshot.list(self.apiclient, id=self.vm_a_snapshot.id) self.assertNotEqual(SnapshotList1, None, - "ROOT Admin not able to list Snapshotss that he owns") + "ROOT Admin not able to list Snapshots that are self-owned") self.assertEqual(len(SnapshotList1), 1, - "ROOT Admin not able to list Snapshotss that he owns") + "ROOT Admin not able to list Snapshots that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_rootadmin_Snapshotsownedbyothers(self): @@ -2734,22 +2734,22 @@ def test_listSnapshot_by_id_as_rootadmin_Snapshotsownedbyothers(self): SnapshotList2 = Snapshot.list(self.apiclient, id=self.vm_d11a_snapshot.id) self.assertNotEqual(SnapshotList1, None, - "ROOT Admin not able to list Snapshotss from other domains") + "ROOT Admin not able to list Snapshots from other domains") self.assertNotEqual(SnapshotList2, None, - "ROOT Admin not able to list Snapshotss from other domains") + "ROOT Admin not able to list Snapshots from other domains") self.assertEqual(len(SnapshotList1), 1, - "ROOT Admin not able to list Snapshotss from other domains") + "ROOT Admin not able to list Snapshots from other domains") self.assertEqual(len(SnapshotList2), 1, - "ROOT Admin not able to list Snapshotss from other domains") + "ROOT Admin not able to list Snapshots from other domains") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_user_own(self): """ - Regular user should be able to list Snapshots that is owned by him by passing uuid in "id" parameter + Regular user should be able to list Snapshots that are self-owned by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d11a_apikey @@ -2758,11 +2758,11 @@ def test_listSnapshot_by_id_as_user_own(self): self.assertNotEqual(SnapshotList1, None, - "Regular User is not able to list Snapshotss that he owns") + "Regular User is not able to list Snapshots that are self-owned") self.assertEqual(len(SnapshotList1), 1, - "Regular User is not able to list Snapshotss that he owns") + "Regular User is not able to list Snapshots that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listSnapshot_by_id_as_user_snapshotfromsamedomaindifferentaccount(self): diff --git a/test/integration/component/test_acl_listvm.py b/test/integration/component/test_acl_listvm.py index 91d25a8ef039..b49461617c03 100644 --- a/test/integration/component/test_acl_listvm.py +++ b/test/integration/component/test_acl_listvm.py @@ -2597,7 +2597,7 @@ def test_listVM_as_domainadmin_cross_domainid(self): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_domainadmin_owns(self): """ - # Domain admin should be able to list Vm that he owns by passing uuid in "id" parameter + # Domain admin should be able to list Vms that are self-owned by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2606,16 +2606,16 @@ def test_listVM_by_id_as_domainadmin_owns(self): self.assertNotEqual(VMList, None, - "Domain Admin is not able to list Vms that he owns") + "Domain Admin is not able to list Vms that are self-owned") self.assertEqual(len(VMList), 1, - "Domain Admin is not able to list Vms that belongs to him") + "Domain Admin is not able to list Vms that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_domainadmin_ownedbyusersindomain(self): """ - # Domain admin should be able to list Vm that is owned by any account in his domain by passing uuid in "id" parameter + # Domain admin should be able to list Vm that is owned by any account in their domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2624,16 +2624,16 @@ def test_listVM_by_id_as_domainadmin_ownedbyusersindomain(self): self.assertNotEqual(VMList1, None, - "Domain Admin is not able to list Vms from his domain") + "Domain Admin is not able to list Vms from their domain") self.assertEqual(len(VMList1), 1, - "Domain Admin is not able to list Vms from his domain") + "Domain Admin is not able to list Vms from their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_domainadmin_ownedbyusersinsubdomain(self): """ - # Domain admin should be able to list Vm that is owned by any account in his sub-domain by passing uuid in "id" parameter + # Domain admin should be able to list Vm that is owned by any account in their sub-domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2642,16 +2642,16 @@ def test_listVM_by_id_as_domainadmin_ownedbyusersinsubdomain(self): self.assertNotEqual(VMList2, None, - "Domain Admin is not able to list Vms from his sub domain") + "Domain Admin is not able to list Vms from their sub domain") self.assertEqual(len(VMList2), 1, - "Domain Admin is not able to list Vms from his sub domain") + "Domain Admin is not able to list Vms from their sub domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_domainadmin_ownedbyusersnotindomain(self): """ - # Domain admin should not be able to list Vm that is owned by account that is not in his domain by passing uuid in "id" parameter + # Domain admin should not be able to list Vm that is owned by account that is not in their domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2665,7 +2665,7 @@ def test_listVM_by_id_as_domainadmin_ownedbyusersnotindomain(self): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_domainadmin_ownedbyusersinsubdomain2(self): """ - # Domain admin should be able to list Vm that is owned by account that is in his sub domains by passing uuid in "id" parameter + # Domain admin should be able to list Vm that is owned by account that is in their sub domains by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2674,16 +2674,16 @@ def test_listVM_by_id_as_domainadmin_ownedbyusersinsubdomain2(self): self.assertNotEqual(VMList4, None, - "Domain Admin is not able to list Vms from his subdomain") + "Domain Admin is not able to list Vms from their sub domains") self.assertEqual(len(VMList4), 1, - "Domain Admin is not able to list Vms from his sub domains") + "Domain Admin is not able to list Vms from their sub domains") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_rootadmin_owns(self): """ - # Domain admin should be able to list Vm that is owned by account that is in his sub domains by passing uuid in "id" parameter + # Domain admin should be able to list Vm that is owned by account that is in their sub domains by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_a_apikey @@ -2691,10 +2691,10 @@ def test_listVM_by_id_as_rootadmin_owns(self): VMList1 = VirtualMachine.list(self.apiclient, id=self.vm_a.id) self.assertNotEqual(VMList1, None, - "ROOT Admin not able to list Vms that he owns") + "ROOT Admin not able to list Vms that are self-owned") self.assertEqual(len(VMList1), 1, - "ROOT Admin not able to list Vms that he owns") + "ROOT Admin not able to list Vms that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_rootadmin_Vmsownedbyothers(self): @@ -2723,7 +2723,7 @@ def test_listVM_by_id_as_rootadmin_Vmsownedbyothers(self): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_user_own(self): """ - # Regular user should be able to list Vm that is owned by him by passing uuid in "id" parameter + # Regular user should be able to list Vms that are self-owned by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d11a_apikey @@ -2732,11 +2732,11 @@ def test_listVM_by_id_as_user_own(self): self.assertNotEqual(VMList1, None, - "Regular User is not able to list Vms that he owns") + "Regular User is not able to list Vms that are self-owned") self.assertEqual(len(VMList1), 1, - "Regular User is not able to list Vms that he owns") + "Regular User is not able to list Vms that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVM_by_id_as_user_vmfromsamedomaindifferentaccount(self): diff --git a/test/integration/component/test_acl_listvolume.py b/test/integration/component/test_acl_listvolume.py index 463bba310d24..be79da8e529c 100644 --- a/test/integration/component/test_acl_listvolume.py +++ b/test/integration/component/test_acl_listvolume.py @@ -2603,7 +2603,7 @@ def test_listVolume_as_domainadmin_cross_domainid(self): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_domainadmin_owns(self): """ - # Domain admin should be able to list Volumes that he owns by passing uuid in "id" parameter + # Domain admin should be able to list Volumes that are self-owned by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2612,16 +2612,16 @@ def test_listVolume_by_id_as_domainadmin_owns(self): self.assertNotEqual(VMList, None, - "Domain Admin is not able to list Volumes that he owns") + "Domain Admin is not able to list Volumes that are self-owned") self.assertEqual(len(VMList), 1, - "Domain Admin is not able to list Volumes that belongs to him") + "Domain Admin is not able to list Volumes that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_domainadmin_ownedbyusersindomain(self): """ - # Domain admin should be able to list Volumes that is owned by any account in his domain by passing uuid in "id" parameter + # Domain admin should be able to list Volumes that is owned by any account in their domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2630,16 +2630,16 @@ def test_listVolume_by_id_as_domainadmin_ownedbyusersindomain(self): self.assertNotEqual(VMList1, None, - "Domain Admin is not able to list Volumes from his domain") + "Domain Admin is not able to list Volumes from their domain") self.assertEqual(len(VMList1), 1, - "Domain Admin is not able to list Volumes from his domain") + "Domain Admin is not able to list Volumes from their domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_domainadmin_ownedbyusersinsubdomain(self): """ - # Domain admin should be able to list Volumes that is owned by any account in his sub-domain by passing uuid in "id" parameter + # Domain admin should be able to list Volumes that is owned by any account in their sub-domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2648,16 +2648,16 @@ def test_listVolume_by_id_as_domainadmin_ownedbyusersinsubdomain(self): self.assertNotEqual(VMList2, None, - "Domain Admin is not able to list Volumes from his sub domain") + "Domain Admin is not able to list Volumes from their sub domain") self.assertEqual(len(VMList2), 1, - "Domain Admin is not able to list Volumes from his sub domain") + "Domain Admin is not able to list Volumes from their sub domain") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_domainadmin_ownedbyusersnotindomain(self): """ - # Domain admin should not be able to list Volumes that is owned by account that is not in his domain by passing uuid in "id" parameter + # Domain admin should not be able to list Volumes that is owned by account that is not in their domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2671,7 +2671,7 @@ def test_listVolume_by_id_as_domainadmin_ownedbyusersnotindomain(self): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_domainadmin_ownedbyusersinsubdomain2(self): """ - # Domain admin should be able to list Volumes that is owned by account that is in his sub domains by passing uuid in "id" parameter + # Domain admin should be able to list Volumes that is owned by account that is in their sub domains by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d1_apikey @@ -2680,16 +2680,16 @@ def test_listVolume_by_id_as_domainadmin_ownedbyusersinsubdomain2(self): self.assertNotEqual(VMList4, None, - "Domain Admin is not able to list Volumes from his subdomain") + "Domain Admin is not able to list Volumes from their subdomain") self.assertEqual(len(VMList4), 1, - "Domain Admin is not able to list Volumes from his sub domains") + "Domain Admin is not able to list Volumes from their sub domains") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_rootadmin_owns(self): """ - # ROOT admin should be able to list Volumes that is owned by accounts in his domain by passing uuid in "id" parameter + # ROOT admin should be able to list Volumes that is owned by accounts in their domain by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_a_apikey @@ -2697,10 +2697,10 @@ def test_listVolume_by_id_as_rootadmin_owns(self): VMList1 = Volume.list(self.apiclient, id=self.vm_a_volume[0].id) self.assertNotEqual(VMList1, None, - "ROOT Admin not able to list Volumes that he owns") + "ROOT Admin not able to list Volumes that are self-owned") self.assertEqual(len(VMList1), 1, - "ROOT Admin not able to list Volumes that he owns") + "ROOT Admin not able to list Volumes that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_rootadmin_Volumesownedbyothers(self): @@ -2729,7 +2729,7 @@ def test_listVolume_by_id_as_rootadmin_Volumesownedbyothers(self): @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_user_own(self): """ - # Regular user should be able to list Volumes that is owned by him by passing uuid in "id" parameter + # Regular user should be able to list Volumes that are self-owned by passing uuid in "id" parameter """ self.apiclient.connection.apiKey = self.user_d11a_apikey @@ -2738,11 +2738,11 @@ def test_listVolume_by_id_as_user_own(self): self.assertNotEqual(VMList1, None, - "Regular User is not able to list Volumes that he owns") + "Regular User is not able to list Volumes that are self-owned") self.assertEqual(len(VMList1), 1, - "Regular User is not able to list Volumes that he owns") + "Regular User is not able to list Volumes that are self-owned") @attr("simulator_only", tags=["advanced"], required_hardware="false") def test_listVolume_by_id_as_user_volumefromsamedomaindifferentaccount(self): diff --git a/test/integration/component/test_acl_sharednetwork_deployVM-impersonation.py b/test/integration/component/test_acl_sharednetwork_deployVM-impersonation.py index d59ccb6165d7..2bd62dc44700 100644 --- a/test/integration/component/test_acl_sharednetwork_deployVM-impersonation.py +++ b/test/integration/component/test_acl_sharednetwork_deployVM-impersonation.py @@ -1196,12 +1196,12 @@ def test_deployVM_in_sharedNetwork_as_domainadmin_scope_all_crossdomainuser(self accountid=self.account_d2a.name, domainid=self.account_d2a.domainid ) - self.fail("Domain admin user is able to Deploy VM for a domain user he does not have access to in a shared network with scope=domain with no subdomain access ") + self.fail("Domain admin user is able to Deploy VM for a domain user, but there is no access to in a shared network with scope=domain with no subdomain access ") except Exception as e: - self.debug("When a Domain admin user deploys a VM for a domain user he does not have access to in a shared network with scope=domain with no subdomain access %s" % e) + self.debug("When a Domain admin user deploys a VM for a domain user, but there is no access to in a shared network with scope=domain with no subdomain access %s" % e) if not CloudstackAclException.verifyMsginException(e, CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN): self.fail( - "Error mesage validation failed when Domain admin user tries to Deploy VM for a domain user he does not have access to in a shared network with scope=domain with no subdomain access ") + "Error mesage validation failed when Domain admin user tries to Deploy VM for a domain user, but there is no access to in a shared network with scope=domain with no subdomain access ") ## Test cases relating to deploying Virtual Machine as Domain admin for other users in shared network with scope=Domain and no subdomain access diff --git a/test/integration/component/test_add_remove_network.py b/test/integration/component/test_add_remove_network.py index 91baa3fdb31e..ac0ecc7c57b8 100644 --- a/test/integration/component/test_add_remove_network.py +++ b/test/integration/component/test_add_remove_network.py @@ -309,7 +309,7 @@ def addNetworkToVm(self, network, vm, ipaddress=None): self.debug("Filtered nics list: %s:" % nics) # Only the nics added to self.virtual_machine should be added to this list - # Nics added to his list are removed before execution of next test case because we are using + # Nics added to their list are removed before execution of next test case because we are using # same virtual machine in all test cases, so it is important that the common # virtual machine should contain only the default nic whenever new test case # execution starts diff --git a/test/integration/component/test_affinity_groups.py b/test/integration/component/test_affinity_groups.py index 445364877a7c..9d4c486b33af 100644 --- a/test/integration/component/test_affinity_groups.py +++ b/test/integration/component/test_affinity_groups.py @@ -1648,7 +1648,7 @@ def test_04_list_all_admin_aff_grp(self): list_aff_grps = AffinityGroup.list(self.api_client) self.assertNotEqual(list_aff_grps, [], "Admin not able to list Affinity " - "Groups belonging to him") + "Groups are owned by the admin") grp_names = [aff_grp1.name, aff_grp2.name] list_names = [] for grp in list_aff_grps: diff --git a/test/integration/component/test_egress_fw_rules.py b/test/integration/component/test_egress_fw_rules.py index fc64cc6a3df3..e1b33bfa5eb0 100644 --- a/test/integration/component/test_egress_fw_rules.py +++ b/test/integration/component/test_egress_fw_rules.py @@ -848,7 +848,7 @@ def test_12_1_egress_fr12(self): @attr(tags=["advanced"], required_hardware="true") def test_13_egress_fr13(self): - """Test Redundant Router : Master failover + """Test Redundant Router : Primary failover """ # Validate the following: # 1. deploy VM using network offering with egress policy true. @@ -865,36 +865,36 @@ def test_13_egress_fr13(self): listall=True) self.assertEqual(isinstance(routers, list), True, - "list router should return Master and backup routers") + "list router should return Primary and backup routers") self.assertEqual(len(routers), 2, - "Length of the list router should be 2 (Backup & master)") + "Length of the list router should be 2 (Backup & primary)") - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] - self.debug("Redundant states: %s, %s" % (master_router.redundantstate, + self.debug("Redundant states: %s, %s" % (primary_router.redundantstate, backup_router.redundantstate)) - self.debug("Stopping the Master router") + self.debug("Stopping the Primary router") try: - Router.stop(self.apiclient, id=master_router.id) + Router.stop(self.apiclient, id=primary_router.id) except Exception as e: - self.fail("Failed to stop master router: %s" % e) + self.fail("Failed to stop primary router: %s" % e) # wait for VR update state time.sleep(60) - self.debug("Checking state of the master router in %s" % self.network.name) + self.debug("Checking state of the primary router in %s" % self.network.name) routers = Router.list(self.apiclient, - id=master_router.id, + id=primary_router.id, listall=True) self.assertEqual(isinstance(routers, list), True, - "list router should return Master and backup routers") + "list router should return Primary and backup routers") self.exec_script_on_user_vm('ping -c 1 www.google.com', "| grep -oP \'\d+(?=% packet loss)\'", @@ -903,7 +903,7 @@ def test_13_egress_fr13(self): @attr(tags=["advanced"], required_hardware="true") def test_13_1_egress_fr13(self): - """Test Redundant Router : Master failover + """Test Redundant Router : Primary failover """ # Validate the following: # 1. deploy VM using network offering with egress policy false. @@ -920,36 +920,36 @@ def test_13_1_egress_fr13(self): listall=True) self.assertEqual(isinstance(routers, list), True, - "list router should return Master and backup routers") + "list router should return Primary and backup routers") self.assertEqual(len(routers), 2, - "Length of the list router should be 2 (Backup & master)") + "Length of the list router should be 2 (Backup & primary)") - if routers[0].redundantstate == 'MASTER': - master_router = routers[0] + if routers[0].redundantstate == 'PRIMARY': + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] - self.debug("Redundant states: %s, %s" % (master_router.redundantstate, + self.debug("Redundant states: %s, %s" % (primary_router.redundantstate, backup_router.redundantstate)) - self.debug("Stopping the Master router") + self.debug("Stopping the Primary router") try: - Router.stop(self.apiclient, id=master_router.id) + Router.stop(self.apiclient, id=primary_router.id) except Exception as e: - self.fail("Failed to stop master router: %s" % e) + self.fail("Failed to stop primary router: %s" % e) # wait for VR update state time.sleep(60) - self.debug("Checking state of the master router in %s" % self.network.name) + self.debug("Checking state of the primary router in %s" % self.network.name) routers = Router.list(self.apiclient, - id=master_router.id, + id=primary_router.id, listall=True) self.assertEqual(isinstance(routers, list), True, - "list router should return Master and backup routers") + "list router should return Primary and backup routers") self.exec_script_on_user_vm('ping -c 1 www.google.com', "| grep -oP \'\d+(?=% packet loss)\'", diff --git a/test/integration/component/test_ip_reservation.py b/test/integration/component/test_ip_reservation.py index c5341516f3a0..7eaaec46cc6b 100644 --- a/test/integration/component/test_ip_reservation.py +++ b/test/integration/component/test_ip_reservation.py @@ -38,7 +38,7 @@ createEnabledNetworkOffering, createNetworkRulesForVM, verifyNetworkState) -from marvin.codes import (PASS, FAIL, FAILED, UNKNOWN, FAULT, MASTER, +from marvin.codes import (PASS, FAIL, FAILED, UNKNOWN, FAULT, PRIMARY, NAT_RULE, STATIC_NAT_RULE) import netaddr @@ -375,7 +375,7 @@ def test_RVR_network(self): # steps # 1. create vm in isolated network with RVR and ip in guestvmcidr # 2. update guestvmcidr - # 3. List routers and stop the master router, wait till backup router comes up + # 3. List routers and stop the primary router, wait till backup router comes up # 4. create another VM # # validation @@ -383,7 +383,7 @@ def test_RVR_network(self): # 2. Existing guest vm ip should not be changed after reservation # 3. Newly created VM should get ip in guestvmcidr # 4. Verify that the network has two routers associated with it - # 5. Backup router should come up when master router is stopped""" + # 5. Backup router should come up when primary router is stopped""" subnet = "10.1."+str(random.randrange(1,254)) gateway = subnet +".1" @@ -413,29 +413,29 @@ def test_RVR_network(self): self.debug("Listing routers for network: %s" % isolated_network_RVR.name) routers = Router.list(self.apiclient, networkid=isolated_network_RVR.id, listall=True) self.assertEqual(validateList(routers)[0], PASS, "Routers list validation failed") - self.assertEqual(len(routers), 2, "Length of the list router should be 2 (Backup & master)") + self.assertEqual(len(routers), 2, "Length of the list router should be 2 (Backup & primary)") - if routers[0].redundantstate == MASTER: - master_router = routers[0] + if routers[0].redundantstate == PRIMARY: + primary_router = routers[0] backup_router = routers[1] else: - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] - self.debug("Stopping router ID: %s" % master_router.id) + self.debug("Stopping router ID: %s" % primary_router.id) try: - Router.stop(self.apiclient, id=master_router.id) + Router.stop(self.apiclient, id=primary_router.id) except Exception as e: - self.fail("Failed to stop master router due to error %s" % e) + self.fail("Failed to stop primary router due to error %s" % e) # wait for VR to update state wait_for_cleanup(self.apiclient, ["router.check.interval"]) - result = verifyRouterState(master_router.id, [UNKNOWN,FAULT]) + result = verifyRouterState(primary_router.id, [UNKNOWN,FAULT]) if result[0] == FAIL: self.fail(result[1]) - result = verifyRouterState(backup_router.id, [MASTER]) + result = verifyRouterState(backup_router.id, [PRIMARY]) if result[0] == FAIL: self.fail(result[1]) diff --git a/test/integration/component/test_multiple_subnets_in_isolated_network.py b/test/integration/component/test_multiple_subnets_in_isolated_network.py index 80334955a816..278728b2adbb 100644 --- a/test/integration/component/test_multiple_subnets_in_isolated_network.py +++ b/test/integration/component/test_multiple_subnets_in_isolated_network.py @@ -209,7 +209,7 @@ def verify_router_publicnic_state(self, router, host, publicNics): if redundant_state == "FAULT": self.logger.debug("Skip as redundant_state is %s" % redundant_state) return - elif redundant_state == "MASTER": + elif redundant_state == "PRIMARY": command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state DOWN" |wc -l' % publicNics elif redundant_state == "BACKUP": command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state UP" |wc -l' % publicNics diff --git a/test/integration/component/test_multiple_subnets_in_isolated_network_rvr.py b/test/integration/component/test_multiple_subnets_in_isolated_network_rvr.py index 0565b98d8e45..d83571f9210e 100644 --- a/test/integration/component/test_multiple_subnets_in_isolated_network_rvr.py +++ b/test/integration/component/test_multiple_subnets_in_isolated_network_rvr.py @@ -209,7 +209,7 @@ def verify_router_publicnic_state(self, router, host, publicNics): if redundant_state == "FAULT": self.logger.debug("Skip as redundant_state is %s" % redundant_state) return - elif redundant_state == "MASTER": + elif redundant_state == "PRIMARY": command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state DOWN" |wc -l' % publicNics elif redundant_state == "BACKUP": command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state UP" |wc -l' % publicNics diff --git a/test/integration/component/test_multiple_subnets_in_vpc.py b/test/integration/component/test_multiple_subnets_in_vpc.py index f20f7c4a8bc3..9167f156e19f 100644 --- a/test/integration/component/test_multiple_subnets_in_vpc.py +++ b/test/integration/component/test_multiple_subnets_in_vpc.py @@ -213,7 +213,7 @@ def verify_router_publicnic_state(self, router, host, publicNics): if redundant_state == "FAULT": self.logger.debug("Skip as redundant_state is %s" % redundant_state) return - elif redundant_state == "MASTER": + elif redundant_state == "PRIMARY": command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state DOWN" |wc -l' % publicNics elif redundant_state == "BACKUP": command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state UP" |wc -l' % publicNics diff --git a/test/integration/component/test_multiple_subnets_in_vpc_rvr.py b/test/integration/component/test_multiple_subnets_in_vpc_rvr.py index ca7731472b61..7c7127307b5c 100644 --- a/test/integration/component/test_multiple_subnets_in_vpc_rvr.py +++ b/test/integration/component/test_multiple_subnets_in_vpc_rvr.py @@ -213,7 +213,7 @@ def verify_router_publicnic_state(self, router, host, publicNics): if redundant_state == "FAULT": self.logger.debug("Skip as redundant_state is %s" % redundant_state) return - elif redundant_state == "MASTER": + elif redundant_state == "PRIMARY": command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state DOWN" |wc -l' % publicNics elif redundant_state == "BACKUP": command = 'ip link show |grep BROADCAST | egrep "%s" |grep "state UP" |wc -l' % publicNics diff --git a/test/integration/component/test_persistent_networks.py b/test/integration/component/test_persistent_networks.py index 97b868802d61..079677d4ffe6 100644 --- a/test/integration/component/test_persistent_networks.py +++ b/test/integration/component/test_persistent_networks.py @@ -879,7 +879,7 @@ def test_persistent_network_with_RVR(self): # 1. create account and isolated network with network # offering which has ispersistent field enabled # and supporting Redundant Virtual Router in it - # 2. Check the Master and Backup Routers are present + # 2. Check the Primary and Backup Routers are present # 3. Deploy VM ,acquire IP, create Firewall, NAT rules # 4. Verify the working of NAT, Firewall rules # @@ -928,7 +928,7 @@ def test_persistent_network_with_RVR(self): self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)") + "Length of the list router should be 2 (Backup & Primary)") # Check if routers are reachable from the host for router in routers: diff --git a/test/integration/component/test_public_ip.py b/test/integration/component/test_public_ip.py index a37226f32fc2..f131d0b71ab0 100644 --- a/test/integration/component/test_public_ip.py +++ b/test/integration/component/test_public_ip.py @@ -473,7 +473,7 @@ def test_02_list_publicip_domain_admin(self): def test_03_list_publicip_user_domain(self): """ A regular user should be able to display public ip address - only in his domain + only in their domain Step 1: Create an isolated network in the user domain Step 2: Display all public ip address in that domain @@ -511,7 +511,7 @@ def test_03_list_publicip_user_domain(self): listall=True, forvirtualnetwork=True) - # Step 3: Ensure that sub domain can list only the ip address in his domain + # Step 3: Ensure that sub domain can list only the ip address in their domain self.assertEqual( len(ipAddresses), 10, @@ -610,7 +610,7 @@ def test_03_list_publicip_user_domain(self): def test_04_list_publicip_all_subdomains(self): """ A domain admin should be able to display public ip address - in his domain and also all child domains + in their domain and also all child domains Step 1: Display all public ip address in that domain and sub domain Step 2: Ensure that the count is 11 (all ip from parent domain and allocated from sub domain) @@ -677,7 +677,7 @@ def test_04_list_publicip_all_subdomains(self): def test_05_list_publicip_user_domain(self): """ A domain admin should be able to display public ip address - in his domain and also all child domains + in their domain and also all child domains Step 1: Display all public ip address in that domain and sub domain Step 2: Ensure that the count is 20 diff --git a/test/integration/component/test_redundant_router_cleanups.py b/test/integration/component/test_redundant_router_cleanups.py index 34b1fb3bef97..2c0805f9fba6 100644 --- a/test/integration/component/test_redundant_router_cleanups.py +++ b/test/integration/component/test_redundant_router_cleanups.py @@ -306,12 +306,12 @@ def test_restart_ntwk_no_cleanup(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) self.debug("restarting network with cleanup=False") @@ -329,12 +329,12 @@ def test_restart_ntwk_no_cleanup(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) for router in routers: self.assertEqual( @@ -440,12 +440,12 @@ def test_restart_ntwk_with_cleanup(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) self.debug("restarting network with cleanup=True") @@ -463,12 +463,12 @@ def test_restart_ntwk_with_cleanup(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) for router in routers: self.assertEqual( @@ -490,7 +490,7 @@ def test_network_gc(self): # 4. stop the running user VM # 5. wait for network.gc time # 6. listRouters - # 7. start the routers MASTER and BACK + # 7. start the routers PRIMARY and BACK # 8. wait for network.gc time and listRouters # 9. delete the account @@ -577,12 +577,12 @@ def test_network_gc(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) self.debug("Stopping the user VM: %s" % virtual_machine.name) @@ -616,7 +616,7 @@ def test_network_gc(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) for router in routers: self.assertEqual( @@ -637,7 +637,7 @@ def test_network_gc(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) for router in routers: self.assertEqual( @@ -658,7 +658,7 @@ def test_network_gc(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) for router in routers: self.assertEqual( @@ -669,15 +669,15 @@ def test_network_gc(self): return @attr(tags=["advanced", "advancedns"], required_hardware="false") - def test_restart_network_with_destroyed_masterVR(self): - """Test restarting RvR network without cleanup after destroying master VR + def test_restart_network_with_destroyed_primaryVR(self): + """Test restarting RvR network without cleanup after destroying primary VR """ # Steps to validate # 1. createNetwork using network offering for redundant virtual router # 2. listRouters in above network # 3. deployVM in above user account in the created network - # 4. Destroy master VR + # 4. Destroy primary VR # 5. restartNetwork cleanup=false # 6. Verify RVR status after network restart @@ -741,46 +741,46 @@ def test_restart_network_with_destroyed_masterVR(self): self.assertEqual( validateList(routers)[0], PASS, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) - if routers[0].redundantstate == 'MASTER' and\ + if routers[0].redundantstate == 'PRIMARY' and\ routers[1].redundantstate == 'BACKUP': - master_router = routers[0] + primary_router = routers[0] backup_router = routers[1] - elif routers[1].redundantstate == 'MASTER' and \ + elif routers[1].redundantstate == 'PRIMARY' and \ routers[0].redundantstate == 'BACKUP': - master_router = routers[1] + primary_router = routers[1] backup_router = routers[0] else: self.fail("Both the routers in RVR are in BackupState - CLOUDSTACK-9015") Router.stop( self.apiclient, - id=master_router.id + id=primary_router.id ) Router.destroy( self.apiclient, - id=master_router.id + id=primary_router.id ) - masterVR = Router.list( + primaryVR = Router.list( self.apiclient, - id=master_router.id + id=primary_router.id ) - self.assertIsNone(masterVR, "Router is not destroyed") - new_master = Router.list( + self.assertIsNone(primaryVR, "Router is not destroyed") + new_primary = Router.list( self.apiclient, id=backup_router.id ) - self.assertEqual(validateList(new_master)[0], PASS, "Invalid response after vr destroy") + self.assertEqual(validateList(new_primary)[0], PASS, "Invalid response after vr destroy") self.assertEqual( - new_master[0].redundantstate, - "MASTER", - "Backup didn't switch to Master after destroying Master VR" + new_primary[0].redundantstate, + "PRIMARY", + "Backup didn't switch to Primary after destroying Primary VR" ) self.debug("restarting network with cleanup=False") @@ -798,12 +798,12 @@ def test_restart_network_with_destroyed_masterVR(self): self.assertEqual( validateList(routers)[0], PASS, - "list router should return Master and backup routers afrer network restart" + "list router should return Primary and backup routers afrer network restart" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & primary)" ) for router in routers: self.assertEqual( @@ -811,12 +811,12 @@ def test_restart_network_with_destroyed_masterVR(self): "Running", "Router state should be running" ) - if routers[0].redundantstate == 'MASTER' and\ + if routers[0].redundantstate == 'PRIMARY' and\ routers[1].redundantstate == 'BACKUP': - self.debug("Found master and backup VRs after network restart") + self.debug("Found primary and backup VRs after network restart") elif routers[0].redundantstate == 'BACKUP' and \ - routers[1].redundantstate == 'MASTER': - self.debug("Found master and backup routers") + routers[1].redundantstate == 'PRIMARY': + self.debug("Found primary and backup routers") else: self.fail("RVR is not in proper start after network restart") return diff --git a/test/integration/component/test_redundant_router_services.py b/test/integration/component/test_redundant_router_services.py index ba282b288343..02d4d72682f7 100644 --- a/test/integration/component/test_redundant_router_services.py +++ b/test/integration/component/test_redundant_router_services.py @@ -209,7 +209,7 @@ def test_enableVPNOverRvR(self): # 1. listNetworks should show the created network in allocated state # 2. listRouters returns no running routers # 3. VMs should be deployed and in Running state - # 4. should list MASTER and BACKUP routers + # 4. should list PRIMARY and BACKUP routers # 5. listPublicIpAddresses for networkid should show acquired IP addr # 6. listRemoteAccessVpns for the network associated should show VPN # created @@ -299,12 +299,12 @@ def test_enableVPNOverRvR(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) self.debug("Associating public IP for network: %s" % network.name) diff --git a/test/integration/component/test_redundant_router_upgrades.py b/test/integration/component/test_redundant_router_upgrades.py index 6a0efb02014f..4e8454762dec 100644 --- a/test/integration/component/test_redundant_router_upgrades.py +++ b/test/integration/component/test_redundant_router_upgrades.py @@ -228,7 +228,7 @@ def test_upgradeVR_to_redundantVR(self): # one Router running for this network # 3. listNetworkOfferings should show craeted offering for RvR # 4. listNetworks shows the network still successfully implemented - # 5. listRouters shows two routers Up and Running (MASTER and BACKUP) + # 5. listRouters shows two routers Up and Running (PRIMARY and BACKUP) network_offerings = NetworkOffering.list( self.apiclient, @@ -349,7 +349,7 @@ def test_upgradeVR_to_redundantVR(self): self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (MASTER & BACKUP)" + "Length of the list router should be 2 (PRIMARY & BACKUP)" ) return @@ -372,7 +372,7 @@ def test_downgradeRvR_to_VR(self): # 1. listNetworkOfferings should show craeted offering for RvR # 2. listNetworks should show the created network in allocated state # 3. VM should be deployed and in Running state and there should be - # two routers (MASTER and BACKUP) for this network + # two routers (PRIMARY and BACKUP) for this network # 4. listNetworkOfferings should show craeted offering for VR # 5. listNetworks shows the network still successfully implemented # 6. listRouters shows only one router for this network in Running @@ -456,7 +456,7 @@ def test_downgradeRvR_to_VR(self): self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (MASTER & BACKUP)" + "Length of the list router should be 2 (PRIMARY & BACKUP)" ) network_offerings = NetworkOffering.list( diff --git a/test/integration/component/test_volumes.py b/test/integration/component/test_volumes.py index 9662f6aa6959..8f11473089e3 100644 --- a/test/integration/component/test_volumes.py +++ b/test/integration/component/test_volumes.py @@ -1191,7 +1191,7 @@ def test_create_volume_under_domain(self): 2. Create a user within this domain 3. As user in step 2. create a volume with standard disk offering 4. Ensure the volume is created in the domain and available to the - user in his listVolumes call + user in their listVolumes call """ dom = Domain.create( self.apiclient, diff --git a/test/integration/plugins/test_nicira_controller.py b/test/integration/plugins/test_nicira_controller.py index 9524a516fc14..aa3786679a51 100644 --- a/test/integration/plugins/test_nicira_controller.py +++ b/test/integration/plugins/test_nicira_controller.py @@ -127,13 +127,13 @@ def setUpClass(cls): 'password': 'admin' } - cls.nicira_master_controller = cls.determine_master_controller( + cls.nicira_primary_controller = cls.determine_primary_controller( cls.nicira_hosts, cls.nicira_credentials ) cls.transport_zone_uuid = cls.get_transport_zone_from_controller( - cls.nicira_master_controller, + cls.nicira_primary_controller, cls.nicira_credentials ) @@ -213,7 +213,7 @@ def tearDown(self): @classmethod - def determine_master_controller(cls, hosts, credentials): + def determine_primary_controller(cls, hosts, credentials): for host in hosts: r1 = requests.post("https://%s/ws.v1/login" % host, credentials, verify=False) r2 = requests.get("https://%s/ws.v1/control-cluster/status" % host, verify=False, cookies=r1.cookies) @@ -260,12 +260,12 @@ def get_nicira_enabled_physical_network_id(cls, physical_networks): return PhysicalNetwork.list(cls.api_client, name=nicira_physical_network_name)[0].id - def determine_slave_conroller(self, hosts, master_controller): - slaves = [ s for s in hosts if s != master_controller ] - if len(slaves) > 0: - return slaves[0] + def determine_secondary_conroller(self, hosts, primary_controller): + secondary = [ s for s in hosts if s != primary_controller ] + if len(secondary) > 0: + return secondary[0] else: - raise Exception("None of the supplied hosts (%s) is a Nicira slave" % hosts) + raise Exception("None of the supplied hosts (%s) is a Nicira secondary" % hosts) def add_nicira_device(self, hostname, l2gatewayserviceuuid=None): @@ -404,29 +404,29 @@ def get_hosts(self): ) - def get_master_router(self, routers): - master = [r for r in routers if r.redundantstate == 'MASTER'] - self.logger.debug("Found %s master router(s): %s" % (master.size(), master)) - return master[0] + def get_primary_router(self, routers): + primary = [r for r in routers if r.redundantstate == 'PRIMARY'] + self.logger.debug("Found %s primary router(s): %s" % (primary.size(), primary)) + return primary[0] def distribute_vm_and_routers_by_hosts(self, virtual_machine, routers): if len(routers) > 1: router = self.get_router(routers) - self.logger.debug("Master Router VM is %s" % router) + self.logger.debug("Primary Router VM is %s" % router) else: router = routers[0] if router.hostid == virtual_machine.hostid: - self.logger.debug("Master Router VM is on the same host as VM") + self.logger.debug("Primary Router VM is on the same host as VM") host = findSuitableHostForMigration(self.api_client, router.id) if host is not None: migrate_router(self.api_client, router.id, host.id) - self.logger.debug("Migrated Master Router VM to host %s" % host.name) + self.logger.debug("Migrated Primary Router VM to host %s" % host.name) else: - self.fail('No suitable host to migrate Master Router VM to') + self.fail('No suitable host to migrate Primary Router VM to') else: - self.logger.debug("Master Router VM is not on the same host as VM: %s, %s" % (router.hostid, virtual_machine.hostid)) + self.logger.debug("Primary Router VM is not on the same host as VM: %s, %s" % (router.hostid, virtual_machine.hostid)) def acquire_publicip(self, network): @@ -459,7 +459,7 @@ def create_natrule(self, vm, public_ip, network): @attr(tags = ["advanced", "smoke", "nicira"], required_hardware="true") def test_01_nicira_controller(self): - self.add_nicira_device(self.nicira_master_controller) + self.add_nicira_device(self.nicira_primary_controller) network = self.create_guest_isolated_network() virtual_machine = self.create_virtual_machine(network) @@ -478,19 +478,19 @@ def test_01_nicira_controller(self): @attr(tags = ["advanced", "smoke", "nicira"], required_hardware="true") def test_02_nicira_controller_redirect(self): """ - Nicira clusters will redirect clients (in this case ACS) to the master node. + Nicira clusters will redirect clients (in this case ACS) to the primary node. This test assumes that a Nicira cluster is present and configured properly, and that it has at least two controller nodes. The test will check that ASC follows redirects by: - - adding a Nicira Nvp device that points to one of the cluster's slave controllers, + - adding a Nicira Nvp device that points to one of the cluster's secondary controllers, - create a VM in a Nicira backed network - If all is well, no matter what controller is specified (slaves or master), the vm (and respective router VM) + If all is well, no matter what controller is specified (secondary or primary), the vm (and respective router VM) should be created without issues. """ - nicira_slave = self.determine_slave_conroller(self.nicira_hosts, self.nicira_master_controller) - self.logger.debug("Nicira slave controller is: %s " % nicira_slave) + nicira_secondary = self.determine_secondary_conroller(self.nicira_hosts, self.nicira_primary_controller) + self.logger.debug("Nicira secondary controller is: %s " % nicira_secondary) - self.add_nicira_device(nicira_slave) + self.add_nicira_device(nicira_secondary) network = self.create_guest_isolated_network() virtual_machine = self.create_virtual_machine(network) @@ -508,7 +508,7 @@ def test_02_nicira_controller_redirect(self): @attr(tags = ["advanced", "smoke", "nicira"], required_hardware="true") def test_03_nicira_tunnel_guest_network(self): - self.add_nicira_device(self.nicira_master_controller) + self.add_nicira_device(self.nicira_primary_controller) network = self.create_guest_isolated_network() virtual_machine = self.create_virtual_machine(network) public_ip = self.acquire_publicip(network) @@ -548,7 +548,7 @@ def test_04_nicira_shared_networks_numerical_vlanid(self): CASE 1) Numerical VLAN_ID provided in network creation """ self.debug("Starting test case 1 for Shared Networks") - self.add_nicira_device(self.nicira_master_controller, self.l2gatewayserviceuuid) + self.add_nicira_device(self.nicira_primary_controller, self.l2gatewayserviceuuid) network = self.create_guest_shared_network_numerical_vlanid() virtual_machine = self.create_virtual_machine_shared_networks(network) @@ -569,7 +569,7 @@ def test_05_nicira_shared_networks_lrouter_uuid_vlan_id(self): CASE 2) Logical Router's UUID as VLAN_ID provided in network creation """ self.debug("Starting test case 2 for Shared Networks") - self.add_nicira_device(self.nicira_master_controller, self.l2gatewayserviceuuid) + self.add_nicira_device(self.nicira_primary_controller, self.l2gatewayserviceuuid) network = self.create_guest_shared_network_uuid_vlanid() virtual_machine = self.create_virtual_machine_shared_networks(network) diff --git a/test/integration/smoke/test_kubernetes_clusters.py b/test/integration/smoke/test_kubernetes_clusters.py index 5ec2d49b039e..d78f3a0afb3b 100644 --- a/test/integration/smoke/test_kubernetes_clusters.py +++ b/test/integration/smoke/test_kubernetes_clusters.py @@ -573,13 +573,13 @@ def listKubernetesCluster(self, cluster_id = None): return clusterResponse[0] return clusterResponse - def createKubernetesCluster(self, name, version_id, size=1, master_nodes=1): + def createKubernetesCluster(self, name, version_id, size=1, control_nodes=1): createKubernetesClusterCmd = createKubernetesCluster.createKubernetesClusterCmd() createKubernetesClusterCmd.name = name createKubernetesClusterCmd.description = name + "-description" createKubernetesClusterCmd.kubernetesversionid = version_id createKubernetesClusterCmd.size = size - createKubernetesClusterCmd.masternodes = master_nodes + createKubernetesClusterCmd.controlnodes = control_nodes createKubernetesClusterCmd.serviceofferingid = self.cks_service_offering.id createKubernetesClusterCmd.zoneid = self.zone.id createKubernetesClusterCmd.noderootdisksize = 10 @@ -622,10 +622,10 @@ def scaleKubernetesCluster(self, cluster_id, size): response = self.apiclient.scaleKubernetesCluster(scaleKubernetesClusterCmd) return response - def getValidKubernetesCluster(self, size=1, master_nodes=1): + def getValidKubernetesCluster(self, size=1, control_nodes=1): cluster = k8s_cluster version = self.kubernetes_version_2 - if master_nodes != 1: + if control_nodes != 1: version = self.kubernetes_version_3 valid = True if cluster == None: @@ -642,7 +642,7 @@ def getValidKubernetesCluster(self, size=1, master_nodes=1): self.debug("Existing cluster, k8s_cluster ID: %s not returned by list API" % cluster_id) if valid == True: try: - self.verifyKubernetesCluster(cluster, cluster.name, None, size, master_nodes) + self.verifyKubernetesCluster(cluster, cluster.name, None, size, control_nodes) self.debug("Existing Kubernetes cluster available with name %s" % cluster.name) except AssertionError as error: valid = False @@ -652,15 +652,15 @@ def getValidKubernetesCluster(self, size=1, master_nodes=1): self.debug("Creating for Kubernetes cluster with name %s" % name) try: self.deleteAllLeftoverClusters() - cluster = self.createKubernetesCluster(name, version.id, size, master_nodes) - self.verifyKubernetesCluster(cluster, name, version.id, size, master_nodes) + cluster = self.createKubernetesCluster(name, version.id, size, control_nodes) + self.verifyKubernetesCluster(cluster, name, version.id, size, control_nodes) except Exception as ex: self.fail("Kubernetes cluster deployment failed: %s" % ex) except AssertionError as err: self.fail("Kubernetes cluster deployment failed during cluster verification: %s" % err) return cluster - def verifyKubernetesCluster(self, cluster_response, name, version_id=None, size=1, master_nodes=1): + def verifyKubernetesCluster(self, cluster_response, name, version_id=None, size=1, control_nodes=1): """Check if Kubernetes cluster is valid""" self.verifyKubernetesClusterState(cluster_response, 'Running') @@ -681,7 +681,7 @@ def verifyKubernetesCluster(self, cluster_response, name, version_id=None, size= "Check KubernetesCluster zone {}, {}".format(cluster_response.zoneid, self.zone.id) ) - self.verifyKubernetesClusterSize(cluster_response, size, master_nodes) + self.verifyKubernetesClusterSize(cluster_response, size, control_nodes) db_cluster_name = self.dbclient.execute("select name from kubernetes_cluster where uuid = '%s';" % cluster_response.id)[0][0] @@ -709,7 +709,7 @@ def verifyKubernetesClusterVersion(self, cluster_response, version_id): "Check KubernetesCluster version {}, {}".format(cluster_response.kubernetesversionid, version_id) ) - def verifyKubernetesClusterSize(self, cluster_response, size=1, master_nodes=1): + def verifyKubernetesClusterSize(self, cluster_response, size=1, control_nodes=1): """Check if Kubernetes cluster node sizes are valid""" self.assertEqual( @@ -719,9 +719,9 @@ def verifyKubernetesClusterSize(self, cluster_response, size=1, master_nodes=1): ) self.assertEqual( - cluster_response.masternodes, - master_nodes, - "Check KubernetesCluster master nodes {}, {}".format(cluster_response.masternodes, master_nodes) + cluster_response.controlnodes, + control_nodes, + "Check KubernetesCluster control nodes {}, {}".format(cluster_response.controlnodes, control_nodes) ) def verifyKubernetesClusterUpgrade(self, cluster_response, version_id): @@ -730,11 +730,11 @@ def verifyKubernetesClusterUpgrade(self, cluster_response, version_id): self.verifyKubernetesClusterState(cluster_response, 'Running') self.verifyKubernetesClusterVersion(cluster_response, version_id) - def verifyKubernetesClusterScale(self, cluster_response, size=1, master_nodes=1): + def verifyKubernetesClusterScale(self, cluster_response, size=1, control_nodes=1): """Check if Kubernetes cluster state and node sizes are valid after upgrade""" self.verifyKubernetesClusterState(cluster_response, 'Running') - self.verifyKubernetesClusterSize(cluster_response, size, master_nodes) + self.verifyKubernetesClusterSize(cluster_response, size, control_nodes) def stopAndVerifyKubernetesCluster(self, cluster_id): """Stop Kubernetes cluster and check if it is really stopped""" diff --git a/test/integration/smoke/test_privategw_acl.py b/test/integration/smoke/test_privategw_acl.py index 1111a488c909..da0ae6a0020a 100644 --- a/test/integration/smoke/test_privategw_acl.py +++ b/test/integration/smoke/test_privategw_acl.py @@ -454,7 +454,7 @@ def performPrivateGWInterfaceTests(self, vpc_off): self.check_pvt_gw_connectivity(vm1, public_ip_1, [vm2.nic[0].ipaddress, vm3.nic[0].ipaddress, vm4.nic[0].ipaddress]) self.check_pvt_gw_connectivity(vm2, public_ip_2, [vm2.nic[0].ipaddress, vm3.nic[0].ipaddress, vm4.nic[0].ipaddress]) - self.stop_router_by_type("MASTER") + self.stop_router_by_type("PRIMARY") self.check_routers_state() self.check_private_gateway_interfaces() @@ -852,10 +852,10 @@ def check_private_gateway_interfaces(self, status_to_check = "UP"): else: self.assertTrue(check_state == 1, "Routers private gateway interface should should have been removed!") - def check_routers_state(self, status_to_check="MASTER", expected_count=1): + def check_routers_state(self, status_to_check="PRIMARY", expected_count=1): routers = self.query_routers() - vals = ["MASTER", "BACKUP", "UNKNOWN"] + vals = ["PRIMARY", "BACKUP", "UNKNOWN"] cnts = [0, 0, 0] result = "UNKNOWN" diff --git a/test/integration/smoke/test_routers_network_ops.py b/test/integration/smoke/test_routers_network_ops.py index 2f122a281e22..cc1774cfc328 100644 --- a/test/integration/smoke/test_routers_network_ops.py +++ b/test/integration/smoke/test_routers_network_ops.py @@ -233,12 +233,12 @@ def test_01_RVR_Network_FW_PF_SSH_default_routes_egress_true(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) public_ips = list_publicIP( @@ -398,12 +398,12 @@ def test_02_RVR_Network_FW_PF_SSH_default_routes_egress_false(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) public_ips = list_publicIP( @@ -573,15 +573,15 @@ def test_03_RVR_Network_check_router_state(self): self.assertEqual( isinstance(routers, list), True, - "list router should return Master and backup routers" + "list router should return Primary and backup routers" ) self.assertEqual( len(routers), 2, - "Length of the list router should be 2 (Backup & master)" + "Length of the list router should be 2 (Backup & Primary)" ) - vals = ["MASTER", "BACKUP", "UNKNOWN"] + vals = ["PRIMARY", "BACKUP", "UNKNOWN"] cnts = [0, 0, 0] result = "UNKNOWN" @@ -632,8 +632,8 @@ def test_03_RVR_Network_check_router_state(self): if result.count(vals[0]) == 1: cnts[vals.index(vals[0])] += 1 - if cnts[vals.index('MASTER')] != 1: - self.fail("No Master or too many master routers found %s" % cnts[vals.index('MASTER')]) + if cnts[vals.index('PRIMARY')] != 1: + self.fail("No Primary or too many primary routers found %s" % cnts[vals.index('PRIMARY')]) return diff --git a/test/integration/smoke/test_vpc_redundant.py b/test/integration/smoke/test_vpc_redundant.py index 9144985d6ce1..d7dadb872b78 100644 --- a/test/integration/smoke/test_vpc_redundant.py +++ b/test/integration/smoke/test_vpc_redundant.py @@ -300,11 +300,11 @@ def query_routers(self, count=2, showall=False): "Check that %s routers were indeed created" % count) def wait_for_vrrp(self): - # Wait until 3*advert_int+skew time to get one of the routers as MASTER + # Wait until 3*advert_int+skew time to get one of the routers as PRIMARY time.sleep(3 * self.advert_int + 5) - def check_routers_state(self,count=2, status_to_check="MASTER", expected_count=1, showall=False): - vals = ["MASTER", "BACKUP", "UNKNOWN", "FAULT"] + def check_routers_state(self,count=2, status_to_check="PRIMARY", expected_count=1, showall=False): + vals = ["PRIMARY", "BACKUP", "UNKNOWN", "FAULT"] cnts = [0, 0, 0, 0] self.wait_for_vrrp() @@ -543,7 +543,7 @@ def test_01_create_redundant_VPC_2tiers_4VMs_4IPs_4PF_ACL(self): self.add_nat_rules() self.do_vpc_test(False) - self.stop_router_by_type("MASTER") + self.stop_router_by_type("PRIMARY") self.check_routers_state(1) self.do_vpc_test(False) @@ -578,11 +578,11 @@ def test_03_create_redundant_VPC_1tier_2VMs_2IPs_2PF_ACL_reboot_routers(self): self.add_nat_rules() self.do_vpc_test(False) - self.reboot_router_by_type("MASTER") + self.reboot_router_by_type("PRIMARY") self.check_routers_state() self.do_vpc_test(False) - self.reboot_router_by_type("MASTER") + self.reboot_router_by_type("PRIMARY") self.check_routers_state() self.do_vpc_test(False) @@ -615,7 +615,7 @@ def test_04_rvpc_network_garbage_collector_nics(self): # Router will be in FAULT state, i.e. keepalived is stopped self.check_routers_state(status_to_check="FAULT", expected_count=2) self.start_vm() - self.check_routers_state(status_to_check="MASTER") + self.check_routers_state(status_to_check="PRIMARY") @attr(tags=["advanced", "intervlan"], required_hardware="true") def test_05_rvpc_multi_tiers(self): @@ -636,7 +636,7 @@ def test_05_rvpc_multi_tiers(self): network.get_net().delete(self.apiclient) self.networks.remove(network) - self.check_routers_state(status_to_check="MASTER") + self.check_routers_state(status_to_check="PRIMARY") self.do_vpc_test(False) def destroy_vm(self, network): diff --git a/tools/apidoc/generatecommand.xsl b/tools/apidoc/generatecommand.xsl index 11530f219cfe..8b53f9fe19a7 100644 --- a/tools/apidoc/generatecommand.xsl +++ b/tools/apidoc/generatecommand.xsl @@ -40,7 +40,7 @@ version="1.0"> -
+
@@ -147,7 +147,7 @@ version="1.0">